From 63e9a4ae6828b76b7d8cd3c647669f584a66080e Mon Sep 17 00:00:00 2001 From: Shirou WAKAYAMA Date: Mon, 5 Oct 2015 11:07:41 +0900 Subject: [PATCH] Fix godeps for MQTT output and remove hostname setting Closes #241 --- CHANGELOG.md | 1 + Godeps/Godeps.json | 45 +- .../.gitignore | 36 + .../CONTRIBUTING.md | 69 + .../DISTRIBUTION | 15 + .../org.eclipse.paho.mqtt.golang.git/LICENSE | 87 + .../README.md | 62 + .../about.html | 41 + .../client.go | 517 + .../components.go | 31 + .../org.eclipse.paho.mqtt.golang.git/edl-v10 | 15 + .../org.eclipse.paho.mqtt.golang.git/epl-v10 | 70 + .../filestore.go | 258 + .../fvt/README.md | 74 + .../fvt/mosquitto.cfg | 17 + .../fvt/rsmb.cfg | 8 + .../fvt/setup_IMA.sh | 111 + .../fvt_client_test.go | 1007 + .../fvt_store_test.go | 496 + .../fvt_test.go | 26 + .../memstore.go | 119 + .../message.go | 104 + .../messageids.go | 61 + .../org.eclipse.paho.mqtt.golang.git/net.go | 275 + .../net_test.go | 17 + .../notice.html | 108 + .../org.eclipse.paho.mqtt.golang.git/oops.go | 27 + .../options.go | 270 + .../packets/connack.go | 57 + .../packets/connect.go | 128 + .../packets/disconnect.go | 44 + .../packets/packets.go | 324 + .../packets/packets_test.go | 159 + .../packets/pingreq.go | 44 + .../packets/pingresp.go | 44 + .../packets/puback.go | 50 + .../packets/pubcomp.go | 50 + .../packets/publish.go | 82 + .../packets/pubrec.go | 50 + .../packets/pubrel.go | 50 + .../packets/suback.go | 58 + .../packets/subscribe.go | 68 + .../packets/unsuback.go | 50 + .../packets/unsubscribe.go | 61 + .../org.eclipse.paho.mqtt.golang.git/ping.go | 73 + .../router.go | 162 + .../samples/bug-ping.go | 18 + .../samples/build.sh | 10 + .../samples/close_bug.go | 23 + .../samples/custom_store.go | 96 + .../samples/mosquitto.conf | 745 + .../samples/pwfile.example | 3 + .../samples/routing.go | 105 + .../samples/sample.go | 130 + .../samples/samplecerts/CAfile.pem | 150 + .../samples/samplecerts/README | 9 + .../samples/samplecerts/client-crt.pem | 20 + .../samples/samplecerts/client-key.pem | 27 + .../samplecerts/intermediateCA-crt.pem | 20 + .../samplecerts/intermediateCA-key.pem | 27 + .../samples/samplecerts/mosquitto.org.crt | 18 + .../samples/samplecerts/rootCA-crt.pem | 21 + .../samples/samplecerts/rootCA-key.pem | 27 + .../samples/samplecerts/server-crt.pem | 21 + .../samples/samplecerts/server-key.pem | 27 + .../samples/sango.go | 51 + .../samples/sim.go | 35 + .../samples/sim2.go | 42 + .../samples/sim_pub.go | 130 + .../samples/simple.go | 58 + .../samples/ssl.go | 123 + .../samples/stdinpub.go | 70 + .../samples/stdoutsub.go | 85 + .../org.eclipse.paho.mqtt.golang.git/store.go | 125 + .../org.eclipse.paho.mqtt.golang.git/token.go | 156 + .../org.eclipse.paho.mqtt.golang.git/topic.go | 82 + .../org.eclipse.paho.mqtt.golang.git/trace.go | 36 + .../unit_client_test.go | 56 + .../unit_messageids_test.go | 111 + .../unit_options_test.go | 126 + .../unit_ping_test.go | 62 + .../unit_router_test.go | 287 + .../unit_store_test.go | 668 + .../unit_topic_test.go | 47 + .../github.com/influxdb/influxdb/.gitignore | 67 - .../github.com/influxdb/influxdb/CHANGELOG.md | 1607 -- .../influxdb/influxdb/CONTRIBUTING.md | 231 - .../github.com/influxdb/influxdb/DOCKER.md | 44 - .../github.com/influxdb/influxdb/Dockerfile | 24 - .../src/github.com/influxdb/influxdb/LICENSE | 20 - .../github.com/influxdb/influxdb/QUERIES.md | 180 - .../github.com/influxdb/influxdb/README.md | 71 - .../github.com/influxdb/influxdb/balancer.go | 78 - .../influxdb/influxdb/balancer_test.go | 115 - .../influxdb/influxdb/build-docker.sh | 7 - .../influxdb/influxdb/circle-test.sh | 63 - .../github.com/influxdb/influxdb/circle.yml | 12 - .../influxdb/influxdb/cluster/client_pool.go | 57 - .../influxdb/influxdb/cluster/config.go | 35 - .../influxdb/influxdb/cluster/config_test.go | 27 - .../influxdb/cluster/internal/data.pb.go | 286 - .../influxdb/cluster/internal/data.proto | 49 - .../influxdb/cluster/points_writer.go | 314 - .../influxdb/cluster/points_writer_test.go | 436 - .../influxdb/influxdb/cluster/rpc.go | 229 - .../influxdb/influxdb/cluster/rpc_test.go | 110 - .../influxdb/influxdb/cluster/service.go | 338 - .../influxdb/influxdb/cluster/service_test.go | 103 - .../influxdb/influxdb/cluster/shard_mapper.go | 207 - .../influxdb/cluster/shard_mapper_test.go | 96 - .../influxdb/influxdb/cluster/shard_writer.go | 163 - .../influxdb/cluster/shard_writer_test.go | 186 - .../influxdb/influxdb/cmd/influx/main.go | 724 - .../influxdb/influxdb/cmd/influx/main_test.go | 194 - .../cmd/influx_stress/influx_stress.go | 154 - .../influxdb/cmd/influxd/backup/backup.go | 170 - .../cmd/influxd/backup/backup_test.go | 125 - .../influxdb/cmd/influxd/help/help.go | 46 - .../influxdb/influxdb/cmd/influxd/main.go | 200 - .../influxdb/cmd/influxd/restore/restore.go | 250 - .../cmd/influxd/restore/restore_test.go | 155 - .../influxdb/cmd/influxd/run/command.go | 235 - .../influxdb/cmd/influxd/run/config.go | 227 - .../cmd/influxd/run/config_command.go | 73 - .../influxdb/cmd/influxd/run/config_test.go | 144 - .../influxdb/cmd/influxd/run/server.go | 536 - .../cmd/influxd/run/server_helpers_test.go | 312 - .../influxdb/cmd/influxd/run/server_test.go | 3719 ---- .../influxdb/cmd/influxd/run/server_test.md | 150 - .../influxdb/influxdb/diagnostics.go | 143 - .../github.com/influxdb/influxdb/errors.go | 78 - .../influxdb/influxdb/etc/burn-in/.rvmrc | 1 - .../influxdb/influxdb/etc/burn-in/Gemfile | 4 - .../influxdb/etc/burn-in/Gemfile.lock | 14 - .../influxdb/influxdb/etc/burn-in/burn-in.rb | 79 - .../influxdb/influxdb/etc/burn-in/log.rb | 23 - .../influxdb/etc/burn-in/random_gaussian.rb | 31 - .../influxdb/etc/burn-in/random_points.rb | 29 - .../influxdb/influxdb/etc/config.sample.toml | 246 - .../influxdb/influxdb/importer/README.md | 186 - .../influxdb/influxdb/importer/v8/importer.go | 236 - .../github.com/influxdb/influxdb/nightly.sh | 14 - .../github.com/influxdb/influxdb/package.sh | 409 - .../influxdb/scripts/influxdb.service | 16 - .../influxdb/influxdb/scripts/init.sh | 205 - .../influxdb/services/admin/config.go | 21 - .../influxdb/services/admin/config_test.go | 32 - .../influxdb/services/admin/service.go | 111 - .../influxdb/services/admin/service_test.go | 33 - .../services/collectd/collectd_test.conf | 209 - .../influxdb/services/collectd/config.go | 44 - .../influxdb/services/collectd/config_test.go | 32 - .../influxdb/services/collectd/service.go | 278 - .../services/collectd/service_test.go | 501 - .../services/collectd/test_client/README.md | 3 - .../services/collectd/test_client/client.go | 71 - .../services/continuous_querier/config.go | 65 - .../continuous_querier/config_test.go | 36 - .../continuous_querier/continuous_queries.md | 236 - .../services/continuous_querier/service.go | 458 - .../continuous_querier/service_test.go | 503 - .../influxdb/services/graphite/README.md | 125 - .../influxdb/services/graphite/config.go | 221 - .../influxdb/services/graphite/config_test.go | 164 - .../influxdb/services/graphite/parser.go | 342 - .../influxdb/services/graphite/parser_test.go | 548 - .../influxdb/services/graphite/service.go | 267 - .../services/graphite/service_test.go | 183 - .../influxdb/influxdb/services/hh/config.go | 44 - .../influxdb/services/hh/config_test.go | 45 - .../influxdb/influxdb/services/hh/doc.go | 5 - .../influxdb/influxdb/services/hh/limiter.go | 61 - .../influxdb/services/hh/limiter_test.go | 47 - .../influxdb/services/hh/processor.go | 218 - .../influxdb/services/hh/processor_test.go | 80 - .../influxdb/influxdb/services/hh/queue.go | 666 - .../influxdb/services/hh/queue_test.go | 327 - .../influxdb/influxdb/services/hh/service.go | 136 - .../influxdb/services/httpd/config.go | 22 - .../influxdb/services/httpd/config_test.go | 52 - .../influxdb/services/httpd/handler.go | 853 - .../influxdb/services/httpd/handler_test.go | 448 - .../services/httpd/response_logger.go | 153 - .../influxdb/services/httpd/service.go | 112 - .../influxdb/services/monitor/config.go | 25 - .../influxdb/services/monitor/monitor.go | 83 - .../influxdb/services/opentsdb/README.md | 8 - .../influxdb/services/opentsdb/config.go | 36 - .../influxdb/services/opentsdb/config_test.go | 38 - .../influxdb/services/opentsdb/handler.go | 181 - .../influxdb/services/opentsdb/service.go | 281 - .../services/opentsdb/service_test.go | 167 - .../influxdb/services/precreator/README.md | 13 - .../influxdb/services/precreator/config.go | 32 - .../services/precreator/config_test.go | 31 - .../influxdb/services/precreator/service.go | 100 - .../services/precreator/service_test.go | 59 - .../influxdb/services/retention/config.go | 16 - .../services/retention/config_test.go | 27 - .../influxdb/services/retention/service.go | 129 - .../influxdb/services/snapshotter/service.go | 145 - .../services/snapshotter/service_test.go | 1 - .../influxdb/influxdb/services/udp/config.go | 12 - .../influxdb/services/udp/config_test.go | 36 - .../influxdb/influxdb/services/udp/service.go | 159 - .../influxdb/influxdb/shared/admin/README.md | 15 - .../influxdb/shared/admin/css/admin.css | 87 - .../influxdb/shared/admin/css/bootstrap.css | 6584 ------- .../admin/css/dropdowns-enhancement.css | 294 - .../fonts/glyphicons-halflings-regular.eot | Bin 20127 -> 0 bytes .../fonts/glyphicons-halflings-regular.svg | 288 - .../fonts/glyphicons-halflings-regular.ttf | Bin 45404 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff | Bin 23424 -> 0 bytes .../fonts/glyphicons-halflings-regular.woff2 | Bin 18028 -> 0 bytes .../shared/admin/img/influxdb-light400.png | Bin 19775 -> 0 bytes .../influxdb/influxdb/shared/admin/index.html | 198 - .../influxdb/shared/admin/js/admin.js | 464 - .../admin/js/vendor/bootstrap-3.3.5.min.js | 7 - .../admin/js/vendor/jquery-2.1.4.min.js | 4 - .../admin/js/vendor/react-0.13.3.min.js | 16 - .../influxdb/influxdb/statik/statik.go | 10 - .../github.com/influxdb/influxdb/tcp/mux.go | 128 - .../influxdb/influxdb/tcp/mux_test.go | 137 - .../influxdb/influxdb/tests/README.md | 4 - .../influxdb/tests/create_future_writes.sh | 22 - .../tests/create_write_multiple_query.sh | 14 - .../tests/create_write_single_query.sh | 19 - ..._with_multiple_measurements_values_tags.sh | 23 - ...e_write_single_with_multiple_tags_query.sh | 11 - .../influxdb/tests/distinct-data-scenarios.sh | 35 - .../influxdb/tests/read_write_gzip.sh | 15 - .../influxdb/influxdb/tests/siege/.gitignore | 1 - .../influxdb/influxdb/tests/siege/README.md | 66 - .../influxdb/influxdb/tests/siege/urlgen | 107 - .../influxdb/influxdb/tests/tmux/3_shards | 28 - .../influxdb/influxdb/tests/tmux/README.md | 31 - .../influxdb/influxdb/tests/tmux/sample.json | 16000 ---------------- .../influxdb/influxdb/tests/tmux/seed.sh | 13 - .../influxdb/tests/tmux/server_8086.toml | 7 - .../influxdb/tests/tmux/server_8087.toml | 7 - .../influxdb/tests/tmux/server_8088.toml | 7 - .../influxdb/influxdb/tests/urlgen/urlgen.go | 58 - .../github.com/influxdb/influxdb/uuid/uuid.go | 93 - .../src/github.com/pborman/uuid/CONTRIBUTORS | 1 + .../src/github.com/pborman/uuid/LICENSE | 27 + .../src/github.com/pborman/uuid/dce.go | 84 + .../src/github.com/pborman/uuid/doc.go | 8 + .../src/github.com/pborman/uuid/hash.go | 53 + .../src/github.com/pborman/uuid/json.go | 30 + .../src/github.com/pborman/uuid/json_test.go | 32 + .../src/github.com/pborman/uuid/node.go | 101 + .../src/github.com/pborman/uuid/seq_test.go | 66 + .../src/github.com/pborman/uuid/time.go | 132 + .../src/github.com/pborman/uuid/util.go | 43 + .../src/github.com/pborman/uuid/uuid.go | 163 + .../src/github.com/pborman/uuid/uuid_test.go | 390 + .../src/github.com/pborman/uuid/version1.go | 41 + .../src/github.com/pborman/uuid/version4.go | 25 + .../src/golang.org/x/net/websocket/client.go | 113 + .../x/net/websocket/exampledial_test.go | 31 + .../x/net/websocket/examplehandler_test.go | 26 + .../src/golang.org/x/net/websocket/hybi.go | 586 + .../golang.org/x/net/websocket/hybi_test.go | 608 + .../src/golang.org/x/net/websocket/server.go | 113 + .../golang.org/x/net/websocket/websocket.go | 412 + .../x/net/websocket/websocket_test.go | 587 + outputs/mqtt/mqtt.go | 26 +- 267 files changed, 13039 insertions(+), 48434 deletions(-) create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go create mode 100644 Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/parser_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/graphite/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/doc.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/limiter_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/processor_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/queue.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/queue_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/hh/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/handler_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/response_logger.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/httpd/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/monitor/monitor.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/handler.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/opentsdb/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/precreator/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/retention/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/snapshotter/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/snapshotter/service_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/config.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/services/udp/service.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/admin.css delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/bootstrap.css delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/css/dropdowns-enhancement.css delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.eot delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.svg delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.ttf delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.woff delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/fonts/glyphicons-halflings-regular.woff2 delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/img/influxdb-light400.png delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/index.html delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/admin.js delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/bootstrap-3.3.5.min.js delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/jquery-2.1.4.min.js delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/shared/admin/js/vendor/react-0.13.3.min.js delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/statik/statik.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tcp/mux.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tcp/mux_test.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_future_writes.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_multiple_query.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_query.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_with_multiple_measurements_values_tags.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/create_write_single_with_multiple_tags_query.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/distinct-data-scenarios.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/read_write_gzip.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/siege/urlgen delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/3_shards delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/README.md delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/sample.json delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/seed.sh delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8086.toml delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8087.toml delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/tmux/server_8088.toml delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/tests/urlgen/urlgen.go delete mode 100644 Godeps/_workspace/src/github.com/influxdb/influxdb/uuid/uuid.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/CONTRIBUTORS create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/LICENSE create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/dce.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/doc.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/hash.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/json.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/json_test.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/node.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/seq_test.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/time.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/util.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/uuid.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/version1.go create mode 100644 Godeps/_workspace/src/github.com/pborman/uuid/version4.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/client.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/exampledial_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/examplehandler_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/hybi.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/hybi_test.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/server.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/websocket.go create mode 100644 Godeps/_workspace/src/golang.org/x/net/websocket/websocket_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 728479937..0493bf153 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ - [#90](https://github.com/influxdb/telegraf/issues/90): Add Docker labels to tags in docker plugin - [#223](https://github.com/influxdb/telegraf/pull/223): Add port tag to nginx plugin. Thanks @neezgee! - [#227](https://github.com/influxdb/telegraf/pull/227): Add command intervals to exec plugin. Thanks @jpalay! +- [#241](https://github.com/influxdb/telegraf/pull/241): MQTT Output. Thanks @shirou! - Memory plugin: cached and buffered measurements re-added - Logging: additional logging for each collection interval, track the number of metrics collected and from how many plugins. diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 17f50ed5c..e25f5f895 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -5,6 +5,11 @@ "./..." ], "Deps": [ + { + "ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git", + "Comment": "v0.9.1-14-g546c47a", + "Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b" + }, { "ImportPath": "github.com/Shopify/sarama", "Comment": "v1.4.3-45-g5b18996", @@ -96,7 +101,32 @@ "Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee" }, { - "ImportPath": "github.com/influxdb/influxdb", + "ImportPath": "github.com/influxdb/influxdb/client", + "Comment": "v0.9.3", + "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" + }, + { + "ImportPath": "github.com/influxdb/influxdb/influxql", + "Comment": "v0.9.3", + "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" + }, + { + "ImportPath": "github.com/influxdb/influxdb/meta", + "Comment": "v0.9.3", + "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" + }, + { + "ImportPath": "github.com/influxdb/influxdb/snapshot", + "Comment": "v0.9.3", + "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" + }, + { + "ImportPath": "github.com/influxdb/influxdb/toml", + "Comment": "v0.9.3", + "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" + }, + { + "ImportPath": "github.com/influxdb/influxdb/tsdb", "Comment": "v0.9.3", "Rev": "5d42b212fca8facfe9db0c83822f09b88be643ec" }, @@ -117,6 +147,10 @@ "ImportPath": "github.com/naoina/toml", "Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e" }, + { + "ImportPath": "github.com/pborman/uuid", + "Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4" + }, { "ImportPath": "github.com/prometheus/client_golang/extraction", "Comment": "0.7.0-22-gbbd006b", @@ -215,6 +249,10 @@ "ImportPath": "golang.org/x/crypto/blowfish", "Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd" }, + { + "ImportPath": "golang.org/x/net/websocket", + "Rev": "db8e4de5b2d6653f66aea53094624468caad15d2" + }, { "ImportPath": "gopkg.in/dancannon/gorethink.v1", "Comment": "v1.x.x", @@ -224,11 +262,6 @@ "ImportPath": "gopkg.in/mgo.v2", "Comment": "r2015.06.03-3-g3569c88", "Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0" - }, - { - "ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git", - "Comment": "v0.9.1-14-g546c47a", - "Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b" } ] } diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore new file mode 100644 index 000000000..47bb0de48 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*.msg +*.lok + +samples/trivial +samples/trivial2 +samples/sample +samples/reconnect +samples/ssl +samples/custom_store +samples/simple +samples/stdinpub +samples/stdoutsub +samples/routing \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md new file mode 100644 index 000000000..ce3771955 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/CONTRIBUTING.md @@ -0,0 +1,69 @@ +Contributing to Paho +==================== + +Thanks for your interest in this project. + +Project description: +-------------------- + +The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT). +Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community. + +- https://projects.eclipse.org/projects/technology.paho + +Developer resources: +-------------------- + +Information regarding source code management, builds, coding standards, and more. + +- https://projects.eclipse.org/projects/technology.paho/developer + +Contributor License Agreement: +------------------------------ + +Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA). + +- http://www.eclipse.org/legal/CLA.php + +Contributing Code: +------------------ + +The Go client uses git with Gerrit for code review, use the following URLs for Gerrit access; + +ssh://@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang + +Configure a remote called review to push your changes to; + +git config remote.review.url ssh://@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang +git config remote.review.push HEAD:refs/for/ + +When you have made and committed a change you can push it to Gerrit for review with; + +git push review + +See https://wiki.eclipse.org/Gerrit for more details on how Gerrit is used in Eclipse, https://wiki.eclipse.org/Gerrit#Gerrit_Code_Review_Cheatsheet has some particularly useful information. + +Git commit messages should follow the style described here; + +http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html + +Contact: +-------- + +Contact the project developers via the project's "dev" list. + +- https://dev.eclipse.org/mailman/listinfo/paho-dev + +Search for bugs: +---------------- + +This project uses Bugzilla to track ongoing development and issues. + +- https://bugs.eclipse.org/bugs/buglist.cgi?product=Paho&component=MQTT-Go + +Create a new bug: +----------------- + +Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome! + +- https://bugs.eclipse.org/bugs/enter_bug.cgi?product=Paho diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION new file mode 100644 index 000000000..34e49731d --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/DISTRIBUTION @@ -0,0 +1,15 @@ + + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE new file mode 100644 index 000000000..aa7cc810f --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/LICENSE @@ -0,0 +1,87 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and + +b) in the case of each subsequent Contributor: + +i) changes to the Program, and + +ii) additions to the Program; + +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. + +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. + +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. + +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. + +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. + +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and + +b) its license agreement: + +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; + +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; + +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and + +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. + +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and + +b) a copy of this Agreement must be included with each copy of the Program. + +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md new file mode 100644 index 000000000..cc26f0759 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/README.md @@ -0,0 +1,62 @@ +Eclipse Paho MQTT Go client +=========================== + + +This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library. + +This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages. + +This library supports a fully asynchronous mode of operation. + + +Installation and Build +---------------------- + +This client is designed to work with the standard Go tools, so installation is as easy as: + +``` +go get git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git +``` + +The client depends on Google's [websockets](http://godoc.org/code.google.com/p/go.net/websocket) package, +also easily installed with the command: + +``` +go get code.google.com/p/go.net/websocket +``` + + +Usage and API +------------- + +Detailed API documentation is available by using to godoc tool, or can be browsed online +using the [godoc.org](http://godoc.org/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git) service. + +Make use of the library by importing it in your Go client source code. For example, +``` +import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +``` + +Samples are available in the `/samples` directory for reference. + + +Runtime tracing +--------------- + +Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG + + +Reporting bugs +-------------- + +Please report bugs under the "MQTT-Go" Component in [Eclipse Bugzilla](http://bugs.eclipse.org/bugs/) for the Paho Technology project. This is a very new library as of Q1 2014, so there are sure to be bugs. + + +More information +---------------- + +Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev). + +General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt). + +There is much more information available via the [MQTT community site](http://mqtt.org). diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html new file mode 100644 index 000000000..b183f417a --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/about.html @@ -0,0 +1,41 @@ + + + +About + + +

About This Content

+ +

December 9, 2013

+

License

+ +

The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise +indicated below, the Content is provided to you under the terms and conditions of the +Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL"). +A copy of the EPL is available at +http://www.eclipse.org/legal/epl-v10.html +and a copy of the EDL is available at +http://www.eclipse.org/org/documents/edl-v10.php. +For purposes of the EPL, "Program" will mean the Content.

+ +

If you did not receive this Content directly from the Eclipse Foundation, the Content is +being redistributed by another party ("Redistributor") and different terms and conditions may +apply to your use of any object code in the Content. Check the Redistributor's license that was +provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise +indicated below, the terms and conditions of the EPL still apply to any source code in the Content +and such source code may be obtained at http://www.eclipse.org.

+ + +

Third Party Content

+

The Content includes items that have been sourced from third parties as set out below. If you + did not receive this Content directly from the Eclipse Foundation, the following is provided + for informational purposes only, and you should look to the Redistributor's license for + terms and conditions of use.

+

+ None

+

+

+ + + + diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go new file mode 100644 index 000000000..1e5fd39a4 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/client.go @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// Package mqtt provides an MQTT v3.1.1 client library. +package mqtt + +import ( + "errors" + "fmt" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "net" + "sync" + "time" +) + +// ClientInt is the interface definition for a Client as used by this +// library, the interface is primarily to allow mocking tests. +type ClientInt interface { + IsConnected() bool + Connect() Token + Disconnect(uint) + disconnect() + Publish(string, byte, bool, interface{}) Token + Subscribe(string, byte, MessageHandler) Token + SubscribeMultiple(map[string]byte, MessageHandler) Token + Unsubscribe(...string) Token +} + +// Client is an MQTT v3.1.1 client for communicating +// with an MQTT server using non-blocking methods that allow work +// to be done in the background. +// An application may connect to an MQTT server using: +// A plain TCP socket +// A secure SSL/TLS socket +// A websocket +// To enable ensured message delivery at Quality of Service (QoS) levels +// described in the MQTT spec, a message persistence mechanism must be +// used. This is done by providing a type which implements the Store +// interface. For convenience, FileStore and MemoryStore are provided +// implementations that should be sufficient for most use cases. More +// information can be found in their respective documentation. +// Numerous connection options may be specified by configuring a +// and then supplying a ClientOptions type. +type Client struct { + sync.RWMutex + messageIds + conn net.Conn + ibound chan packets.ControlPacket + obound chan *PacketAndToken + oboundP chan *PacketAndToken + msgRouter *router + stopRouter chan bool + incomingPubChan chan *packets.PublishPacket + errors chan error + stop chan struct{} + persist Store + options ClientOptions + lastContact lastcontact + pingOutstanding bool + connected bool + workers sync.WaitGroup +} + +// NewClient will create an MQTT v3.1.1 client with all of the options specified +// in the provided ClientOptions. The client must have the Start method called +// on it before it may be used. This is to make sure resources (such as a net +// connection) are created before the application is actually ready. +func NewClient(o *ClientOptions) *Client { + c := &Client{} + c.options = *o + + if c.options.Store == nil { + c.options.Store = NewMemoryStore() + } + switch c.options.ProtocolVersion { + case 3, 4: + c.options.protocolVersionExplicit = true + default: + c.options.ProtocolVersion = 4 + c.options.protocolVersionExplicit = false + } + c.persist = c.options.Store + c.connected = false + c.messageIds = messageIds{index: make(map[uint16]Token)} + c.msgRouter, c.stopRouter = newRouter() + c.msgRouter.setDefaultHandler(c.options.DefaultPublishHander) + return c +} + +// IsConnected returns a bool signifying whether +// the client is connected or not. +func (c *Client) IsConnected() bool { + c.RLock() + defer c.RUnlock() + return c.connected +} + +func (c *Client) setConnected(status bool) { + c.Lock() + defer c.Unlock() + c.connected = status +} + +//ErrNotConnected is the error returned from function calls that are +//made when the client is not connected to a broker +var ErrNotConnected = errors.New("Not Connected") + +// Connect will create a connection to the message broker +// If clean session is false, then a slice will +// be returned containing Receipts for all messages +// that were in-flight at the last disconnect. +// If clean session is true, then any existing client +// state will be removed. +func (c *Client) Connect() Token { + var err error + t := newToken(packets.Connect).(*ConnectToken) + DEBUG.Println(CLI, "Connect()") + + go func() { + var rc byte + cm := newConnectMsgFromOptions(&c.options) + + for _, broker := range c.options.Servers { + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc = c.connect() + if rc != packets.Accepted { + c.conn.Close() + c.conn = nil + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + + if c.conn == nil { + ERROR.Println(CLI, "Failed to connect to a broker") + t.returnCode = rc + if rc != packets.ErrNetworkError { + t.err = packets.ConnErrors[rc] + } else { + t.err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err) + } + t.flowComplete() + return + } + + c.lastContact.update() + c.persist.Open() + + c.obound = make(chan *PacketAndToken, 100) + c.oboundP = make(chan *PacketAndToken, 100) + c.ibound = make(chan packets.ControlPacket) + c.errors = make(chan error) + c.stop = make(chan struct{}) + + c.incomingPubChan = make(chan *packets.PublishPacket, 100) + c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c) + + c.workers.Add(1) + go outgoing(c) + go alllogic(c) + + c.connected = true + DEBUG.Println(CLI, "client is connected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + if c.options.KeepAlive != 0 { + c.workers.Add(1) + go keepalive(c) + } + + // Take care of any messages in the store + //var leftovers []Receipt + if c.options.CleanSession == false { + //leftovers = c.resume() + } else { + c.persist.Reset() + } + + // Do not start incoming until resume has completed + c.workers.Add(1) + go incoming(c) + + DEBUG.Println(CLI, "exit startClient") + t.flowComplete() + }() + return t +} + +// internal function used to reconnect the client when it loses its connection +func (c *Client) reconnect() { + DEBUG.Println(CLI, "enter reconnect") + var rc byte = 1 + var sleep uint = 1 + var err error + + for rc != 0 { + cm := newConnectMsgFromOptions(&c.options) + + for _, broker := range c.options.Servers { + CONN: + DEBUG.Println(CLI, "about to write new connect msg") + c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout) + if err == nil { + DEBUG.Println(CLI, "socket connected to broker") + switch c.options.ProtocolVersion { + case 3: + DEBUG.Println(CLI, "Using MQTT 3.1 protocol") + cm.ProtocolName = "MQIsdp" + cm.ProtocolVersion = 3 + default: + DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol") + c.options.ProtocolVersion = 4 + cm.ProtocolName = "MQTT" + cm.ProtocolVersion = 4 + } + cm.Write(c.conn) + + rc = c.connect() + if rc != packets.Accepted { + c.conn.Close() + c.conn = nil + //if the protocol version was explicitly set don't do any fallback + if c.options.protocolVersionExplicit { + ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc]) + continue + } + if c.options.ProtocolVersion == 4 { + DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol") + c.options.ProtocolVersion = 3 + goto CONN + } + } + break + } else { + ERROR.Println(CLI, err.Error()) + WARN.Println(CLI, "failed to connect to broker, trying next") + rc = packets.ErrNetworkError + } + } + if rc != 0 { + DEBUG.Println(CLI, "Reconnect failed, sleeping for", sleep, "seconds") + time.Sleep(time.Duration(sleep) * time.Second) + if sleep <= uint(c.options.MaxReconnectInterval.Seconds()) { + sleep *= 2 + } + } + } + + c.lastContact.update() + c.stop = make(chan struct{}) + + c.workers.Add(1) + go outgoing(c) + go alllogic(c) + + c.setConnected(true) + DEBUG.Println(CLI, "client is reconnected") + if c.options.OnConnect != nil { + go c.options.OnConnect(c) + } + + if c.options.KeepAlive != 0 { + c.workers.Add(1) + go keepalive(c) + } + c.workers.Add(1) + go incoming(c) +} + +// This function is only used for receiving a connack +// when the connection is first started. +// This prevents receiving incoming data while resume +// is in progress if clean session is false. +func (c *Client) connect() byte { + DEBUG.Println(NET, "connect started") + + ca, err := packets.ReadPacket(c.conn) + if err != nil { + ERROR.Println(NET, "connect got error", err) + //c.errors <- err + return packets.ErrNetworkError + } + msg := ca.(*packets.ConnackPacket) + + if msg == nil || msg.FixedHeader.MessageType != packets.Connack { + ERROR.Println(NET, "received msg that was nil or not CONNACK") + } else { + DEBUG.Println(NET, "received connack") + } + return msg.ReturnCode +} + +// Disconnect will end the connection with the server, but not before waiting +// the specified number of milliseconds to wait for existing work to be +// completed. +func (c *Client) Disconnect(quiesce uint) { + if !c.IsConnected() { + WARN.Println(CLI, "already disconnected") + return + } + DEBUG.Println(CLI, "disconnecting") + c.setConnected(false) + + dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket) + dt := newToken(packets.Disconnect) + c.oboundP <- &PacketAndToken{p: dm, t: dt} + + // wait for work to finish, or quiesce time consumed + dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond) + c.disconnect() +} + +// ForceDisconnect will end the connection with the mqtt broker immediately. +func (c *Client) forceDisconnect() { + if !c.IsConnected() { + WARN.Println(CLI, "already disconnected") + return + } + c.setConnected(false) + c.conn.Close() + DEBUG.Println(CLI, "forcefully disconnecting") + c.disconnect() +} + +func (c *Client) internalConnLost(err error) { + close(c.stop) + c.conn.Close() + c.workers.Wait() + if c.IsConnected() { + if c.options.OnConnectionLost != nil { + go c.options.OnConnectionLost(c, err) + } + if c.options.AutoReconnect { + go c.reconnect() + } else { + c.setConnected(false) + } + } +} + +func (c *Client) disconnect() { + select { + case <-c.stop: + //someone else has already closed the channel, must be error + default: + close(c.stop) + } + c.conn.Close() + c.workers.Wait() + close(c.stopRouter) + DEBUG.Println(CLI, "disconnected") + c.persist.Close() +} + +// Publish will publish a message with the specified QoS +// and content to the specified topic. +// Returns a read only channel used to track +// the delivery of the message. +func (c *Client) Publish(topic string, qos byte, retained bool, payload interface{}) Token { + token := newToken(packets.Publish).(*PublishToken) + DEBUG.Println(CLI, "enter Publish") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = qos + pub.TopicName = topic + pub.Retain = retained + switch payload.(type) { + case string: + pub.Payload = []byte(payload.(string)) + case []byte: + pub.Payload = payload.([]byte) + default: + token.err = errors.New("Unknown payload type") + token.flowComplete() + return token + } + + DEBUG.Println(CLI, "sending publish message, topic:", topic) + c.obound <- &PacketAndToken{p: pub, t: token} + return token +} + +// Subscribe starts a new subscription. Provide a MessageHandler to be executed when +// a message is published on the topic provided. +func (c *Client) Subscribe(topic string, qos byte, callback MessageHandler) Token { + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter Subscribe") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if err := validateTopicAndQos(topic, qos); err != nil { + token.err = err + return token + } + sub.Topics = append(sub.Topics, topic) + sub.Qoss = append(sub.Qoss, qos) + DEBUG.Println(sub.String()) + + if callback != nil { + c.msgRouter.addRoute(topic, callback) + } + + token.subs = append(token.subs, topic) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit Subscribe") + return token +} + +// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to +// be executed when a message is published on one of the topics provided. +func (c *Client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token { + var err error + token := newToken(packets.Subscribe).(*SubscribeToken) + DEBUG.Println(CLI, "enter SubscribeMultiple") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil { + token.err = err + return token + } + + if callback != nil { + for topic := range filters { + c.msgRouter.addRoute(topic, callback) + } + } + token.subs = make([]string, len(sub.Topics)) + copy(token.subs, sub.Topics) + c.oboundP <- &PacketAndToken{p: sub, t: token} + DEBUG.Println(CLI, "exit SubscribeMultiple") + return token +} + +// Unsubscribe will end the subscription from each of the topics provided. +// Messages published to those topics from other clients will no longer be +// received. +func (c *Client) Unsubscribe(topics ...string) Token { + token := newToken(packets.Unsubscribe).(*UnsubscribeToken) + DEBUG.Println(CLI, "enter Unsubscribe") + if !c.IsConnected() { + token.err = ErrNotConnected + token.flowComplete() + return token + } + unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) + unsub.Topics = make([]string, len(topics)) + copy(unsub.Topics, topics) + + c.oboundP <- &PacketAndToken{p: unsub, t: token} + for _, topic := range topics { + c.msgRouter.deleteRoute(topic) + } + + DEBUG.Println(CLI, "exit Unsubscribe") + return token +} + +//DefaultConnectionLostHandler is a definition of a function that simply +//reports to the DEBUG log the reason for the client losing a connection. +func DefaultConnectionLostHandler(client *Client, reason error) { + DEBUG.Println("Connection lost:", reason.Error()) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go new file mode 100644 index 000000000..01f5fafdf --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/components.go @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +type component string + +// Component names for debug output +const ( + NET component = "[net] " + PNG component = "[pinger] " + CLI component = "[client] " + DEC component = "[decode] " + MES component = "[message] " + STR component = "[store] " + MID component = "[msgids] " + TST component = "[test] " + STA component = "[state] " + ERR component = "[error] " +) diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 new file mode 100644 index 000000000..cf989f145 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/edl-v10 @@ -0,0 +1,15 @@ + +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 new file mode 100644 index 000000000..79e486c3d --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/epl-v10 @@ -0,0 +1,70 @@ +Eclipse Public License - v 1.0 + +THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + +a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and +b) in the case of each subsequent Contributor: +i) changes to the Program, and +ii) additions to the Program; +where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program. +"Contributor" means any person or entity that distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program. + +"Program" means the Contributions distributed in accordance with this Agreement. + +"Recipient" means anyone who receives the Program under this Agreement, including all Contributors. + +2. GRANT OF RIGHTS + +a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form. +b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder. +c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program. +d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement. +3. REQUIREMENTS + +A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that: + +a) it complies with the terms and conditions of this Agreement; and +b) its license agreement: +i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose; +ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits; +iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and +iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange. +When the Program is made available in source code form: + +a) it must be made available under this Agreement; and +b) a copy of this Agreement must be included with each copy of the Program. +Contributors may not remove or alter any copyright notices contained within the Program. + +Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved. + +This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go new file mode 100644 index 000000000..c4a0c8b91 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/filestore.go @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "io" + "io/ioutil" + "os" + "path" + "sync" +) + +const ( + msgExt = ".msg" + bkpExt = ".bkp" +) + +// FileStore implements the store interface using the filesystem to provide +// true persistence, even across client failure. This is designed to use a +// single directory per running client. If you are running multiple clients +// on the same filesystem, you will need to be careful to specify unique +// store directories for each. +type FileStore struct { + sync.RWMutex + directory string + opened bool +} + +// NewFileStore will create a new FileStore which stores its messages in the +// directory provided. +func NewFileStore(directory string) *FileStore { + store := &FileStore{ + directory: directory, + opened: false, + } + return store +} + +// Open will allow the FileStore to be used. +func (store *FileStore) Open() { + store.Lock() + defer store.Unlock() + // if no store directory was specified in ClientOpts, by default use the + // current working directory + if store.directory == "" { + store.directory, _ = os.Getwd() + } + + // if store dir exists, great, otherwise, create it + if !exists(store.directory) { + perms := os.FileMode(0770) + merr := os.MkdirAll(store.directory, perms) + chkerr(merr) + } + store.opened = true + DEBUG.Println(STR, "store is opened at", store.directory) +} + +// Close will disallow the FileStore from being used. +func (store *FileStore) Close() { + store.Lock() + defer store.Unlock() + store.opened = false + WARN.Println(STR, "store is not open") +} + +// Put will put a message into the store, associated with the provided +// key value. +func (store *FileStore) Put(key string, m packets.ControlPacket) { + store.Lock() + defer store.Unlock() + chkcond(store.opened) + full := fullpath(store.directory, key) + if exists(full) { + backup(store.directory, key) // make a copy of what already exists + defer unbackup(store.directory, key) + } + write(store.directory, key, m) + chkcond(exists(full)) +} + +// Get will retrieve a message from the store, the one associated with +// the provided key value. +func (store *FileStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + chkcond(store.opened) + filepath := fullpath(store.directory, key) + if !exists(filepath) { + return nil + } + mfile, oerr := os.Open(filepath) + chkerr(oerr) + //all, rerr := ioutil.ReadAll(mfile) + //chkerr(rerr) + msg, rerr := packets.ReadPacket(mfile) + chkerr(rerr) + cerr := mfile.Close() + chkerr(cerr) + return msg +} + +// All will provide a list of all of the keys associated with messages +// currenly residing in the FileStore. +func (store *FileStore) All() []string { + store.RLock() + defer store.RUnlock() + return store.all() +} + +// Del will remove the persisted message associated with the provided +// key from the FileStore. +func (store *FileStore) Del(key string) { + store.Lock() + defer store.Unlock() + store.del(key) +} + +// Reset will remove all persisted messages from the FileStore. +func (store *FileStore) Reset() { + store.Lock() + defer store.Unlock() + WARN.Println(STR, "FileStore Reset") + for _, key := range store.all() { + store.del(key) + } +} + +// lockless +func (store *FileStore) all() []string { + chkcond(store.opened) + keys := []string{} + files, rderr := ioutil.ReadDir(store.directory) + chkerr(rderr) + for _, f := range files { + DEBUG.Println(STR, "file in All():", f.Name()) + key := f.Name()[0 : len(f.Name())-4] // remove file extension + keys = append(keys, key) + } + return keys +} + +// lockless +func (store *FileStore) del(key string) { + chkcond(store.opened) + DEBUG.Println(STR, "store del filepath:", store.directory) + DEBUG.Println(STR, "store delete key:", key) + filepath := fullpath(store.directory, key) + DEBUG.Println(STR, "path of deletion:", filepath) + if !exists(filepath) { + WARN.Println(STR, "store could not delete key:", key) + return + } + rerr := os.Remove(filepath) + chkerr(rerr) + DEBUG.Println(STR, "del msg:", key) + chkcond(!exists(filepath)) +} + +func fullpath(store string, key string) string { + p := path.Join(store, key+msgExt) + return p +} + +func bkppath(store string, key string) string { + p := path.Join(store, key+bkpExt) + return p +} + +// create file called "X.[messageid].msg" located in the store +// the contents of the file is the bytes of the message +// if a message with m's message id already exists, it will +// be overwritten +// X will be 'i' for inbound messages, and O for outbound messages +func write(store, key string, m packets.ControlPacket) { + filepath := fullpath(store, key) + f, err := os.Create(filepath) + chkerr(err) + werr := m.Write(f) + chkerr(werr) + cerr := f.Close() + chkerr(cerr) +} + +func exists(file string) bool { + if _, err := os.Stat(file); err != nil { + if os.IsNotExist(err) { + return false + } + chkerr(err) + } + return true +} + +func backup(store, key string) { + bkpp := bkppath(store, key) + fulp := fullpath(store, key) + backup, err := os.Create(bkpp) + chkerr(err) + mfile, oerr := os.Open(fulp) + chkerr(oerr) + _, cerr := io.Copy(backup, mfile) + chkerr(cerr) + clberr := backup.Close() + chkerr(clberr) + clmerr := mfile.Close() + chkerr(clmerr) +} + +// Identify .bkp files in the store and turn them into .msg files, +// whether or not it overwrites an existing file. This is safe because +// I'm copying the Paho Java client and they say it is. +func restore(store string) { + files, rderr := ioutil.ReadDir(store) + chkerr(rderr) + for _, f := range files { + fname := f.Name() + if len(fname) > 4 { + if fname[len(fname)-4:] == bkpExt { + key := fname[0 : len(fname)-4] + fulp := fullpath(store, key) + msg, cerr := os.Create(fulp) + chkerr(cerr) + bkpp := path.Join(store, fname) + bkp, oerr := os.Open(bkpp) + chkerr(oerr) + n, cerr := io.Copy(msg, bkp) + chkerr(cerr) + chkcond(n > 0) + clmerr := msg.Close() + chkerr(clmerr) + clberr := bkp.Close() + chkerr(clberr) + remerr := os.Remove(bkpp) + chkerr(remerr) + } + } + } +} + +func unbackup(store, key string) { + bkpp := bkppath(store, key) + remerr := os.Remove(bkpp) + chkerr(remerr) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md new file mode 100644 index 000000000..17790426f --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/README.md @@ -0,0 +1,74 @@ +FVT Instructions +================ + +The FVT tests are currenly only supported by [IBM MessageSight](http://www-03.ibm.com/software/products/us/en/messagesight/). + +Support for [mosquitto](http://mosquitto.org/) and [IBM Really Small Message Broker](https://www.ibm.com/developerworks/community/groups/service/html/communityview?communityUuid=d5bedadd-e46f-4c97-af89-22d65ffee070) might be added in the future. + + +IBM MessageSight Configuration +------------------------------ + +The IBM MessageSight Virtual Appliance can be downloaded here: +[Download](http://www-933.ibm.com/support/fixcentral/swg/selectFixes?parent=ibm~Other+software&product=ibm/Other+software/MessageSight&function=fixId&fixids=1.0.0.1-IMA-DeveloperImage&includeSupersedes=0 "IBM MessageSight") + +There is a nice blog post about it here: +[Blog](https://www.ibm.com/developerworks/community/blogs/c565c720-fe84-4f63-873f-607d87787327/entry/ibm_messagesight_for_developers_is_here?lang=en "Blog") + + +The virtual appliance must be installed into a virtual machine like +Oracle VirtualBox or VMWare Player. (Follow the instructions that come +with the download). + +Next, copy your authorized keys (basically a file containing the public +rsa key of your own computer) onto the appliance to enable passwordless ssh. + +For example, + + Console> user sshkey add "scp://user@host:~/.ssh/authorized_keys" + +More information can be found in the IBM MessageSight InfoCenter: +[InfoCenter](https://infocenters.hursley.ibm.com/ism/v1/help/index.jsp "InfoCenter") + +Now, execute the script setup_IMA.sh to create the objects necessary +to configure the server for the unit test cases provided. + +For example, + + ./setup_IMA.sh + +You should now be able to view the objects on your server: + + Console> imaserver show Endpoint Name=GoMqttEP1 + Name = GoMqttEP1 + Enabled = True + Port = 17001 + Protocol = MQTT + Interface = all + SecurityProfile = + ConnectionPolicies = GoMqttCP1 + MessagingPolicies = GoMqttMP1 + MaxMessageSize = 1024KB + MessageHub = GoMqttTestHub + Description = + + + +RSMB Configuration +------------------ +Wait for SSL support? + + +Mosquitto Configuration +----------------------- +Launch mosquitto from the fvt directory, specifiying mosquitto.cfg as config file + +``ex: /usr/bin/mosquitto -c ./mosquitto.cfg`` + +Note: Mosquitto requires SSL 1.1 or better, while Go 1.1.2 supports +only SSL v1.0. However, Go 1.2+ supports SSL v1.1 and SSL v1.2. + + +Other Notes +----------- +Go 1.1.2 does not support intermediate certificates, however Go 1.2+ does. diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg new file mode 100644 index 000000000..cddb94f31 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/mosquitto.cfg @@ -0,0 +1,17 @@ +allow_anonymous true +allow_duplicate_messages false +connection_messages true +log_dest stdout +log_timestamp true +log_type all +persistence false +bind_address 127.0.0.1 + +listener 17001 +listener 17002 +listener 17003 +listener 17004 + +#capath ../samples/samplecerts +#certfile ../samples/samplecerts/server-crt.pem +#keyfile ../samples/samplecerts/server-key.pem diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg new file mode 100644 index 000000000..1dd77547b --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/rsmb.cfg @@ -0,0 +1,8 @@ +allow_anonymous false +bind_address 127.0.0.1 +connection_messages true +log_level detail + +listener 17001 +#listener 17003 +#listener 17004 \ No newline at end of file diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh new file mode 100644 index 000000000..6ebdda3c2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt/setup_IMA.sh @@ -0,0 +1,111 @@ +#!/bin/bash + +####################################################################### +# This script is for configuring your IBM Messaging Appliance for use # +# as an mqtt test server for testing the go-mqtt open source client. # +# It creates the Policies and Endpoints necessary to test particular # +# features of the client, such as IPv6, SSL, and other things # +# # +# You do not need this script for any other purpose. # +####################################################################### + +# Edit options to match your configuration +IMA_HOST=9.41.55.184 +IMA_USER=admin +HOST=9.41.55.146 +USER=root +CERTDIR=~/GO/src/github.com/shoenig/go-mqtt/samples/samplecerts + +echo 'Configuring your IBM Messaging Appliance for testing go-mqtt' +echo 'IMA_HOST: ' $IMA_HOST + + +function ima { + reply=`ssh $IMA_USER@$IMA_HOST imaserver $@` +} + +function imp { + reply=`ssh $IMA_USER@$IMA_HOST file get $@` +} + +ima create MessageHub Name=GoMqttTestHub + +# Config "1" is a basic, open endpoint, port 17001 +ima create MessagingPolicy \ + Name=GoMqttMP1 \ + Protocol=MQTT \ + ActionList=Publish,Subscribe \ + MaxMessages=100000 \ + DestinationType=Topic \ + Destination=* + +ima create ConnectionPolicy \ + Name=GoMqttCP1 \ + Protocol=MQTT + +ima create Endpoint \ + Name=GoMqttEP1 \ + Protocol=MQTT \ + MessageHub=GoMqttTestHub \ + ConnectionPolicies=GoMqttCP1 \ + MessagingPolicies=GoMqttMP1 \ + Port=17001 + +# Config "2" is IPv6 only , port 17002 + +# Config "3" is for authorization failures, port 17003 +ima create ConnectionPolicy \ + Name=GoMqttCP2 \ + Protocol=MQTT \ + ClientID=GoMqttClient + +ima create Endpoint \ + Name=GoMqttEP3 \ + Protocol=MQTT \ + MessageHub=GoMqttTestHub \ + ConnectionPolicies=GoMqttCP2 \ + MessagingPolicies=GoMqttMP1 \ + Port=17003 + +# Config "4" is secure connections, port 17004 +imp scp://$USER@$HOST:${CERTDIR}/server-crt.pem . +imp scp://$USER@$HOST:${CERTDIR}/server-key.pem . +imp scp://$USER@$HOST:${CERTDIR}/rootCA-crt.pem . +imp scp://$USER@$HOST:${CERTDIR}/intermediateCA-crt.pem . + +ima apply Certificate \ + CertFileName=server-crt.pem \ + "CertFilePassword=" \ + KeyFileName=server-key.pem \ + "KeyFilePassword=" + +ima create CertificateProfile \ + Name=GoMqttCertProf \ + Certificate=server-crt.pem \ + Key=server-key.pem + +ima create SecurityProfile \ + Name=GoMqttSecProf \ + MinimumProtocolMethod=SSLv3 \ + UseClientCertificate=True \ + UsePasswordAuthentication=False \ + Ciphers=Fast \ + CertificateProfile=GoMqttCertProf + +ima apply Certificate \ + TrustedCertificate=rootCA-crt.pem \ + SecurityProfileName=GoMqttSecProf + +ima apply Certificate \ + TrustedCertificate=intermediateCA-crt.pem \ + SecurityProfileName=GoMqttSecProf + +ima create Endpoint \ + Name=GoMqttEP4 \ + Port=17004 \ + MessageHub=GoMqttTestHub \ + ConnectionPolicies=GoMqttCP1 \ + MessagingPolicies=GoMqttMP1 \ + SecurityProfile=GoMqttSecProf \ + Protocol=MQTT + diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go new file mode 100644 index 000000000..8a914ceab --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_client_test.go @@ -0,0 +1,1007 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import "fmt" +import "time" +import "bytes" + +import "io/ioutil" +import "crypto/tls" +import "crypto/x509" +import "testing" + +func Test_Start(t *testing.T) { + ops := NewClientOptions().SetClientID("Start"). + AddBroker(FVTTCP). + SetStore(NewFileStore("/tmp/fvt/Start")) + c := NewClient(ops) + + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + + c.Disconnect(250) +} + +/* uncomment this if you have connection policy disallowing FailClientID +func Test_InvalidConnRc(t *testing.T) { + ops := NewClientOptions().SetClientID("FailClientID"). + AddBroker("tcp://" + FVT_IP + ":17003"). + SetStore(NewFileStore("/tmp/fvt/InvalidConnRc")) + + c := NewClient(ops) + _, err := c.Connect() + if err != ErrNotAuthorized { + t.Fatalf("Did not receive error as expected, got %v", err) + } + c.Disconnect(250) +} +*/ + +// Helper function for Test_Start_Ssl +func NewTLSConfig() *tls.Config { + certpool := x509.NewCertPool() + pemCerts, err := ioutil.ReadFile("samples/samplecerts/CAfile.pem") + if err == nil { + certpool.AppendCertsFromPEM(pemCerts) + } + + cert, err := tls.LoadX509KeyPair("samples/samplecerts/client-crt.pem", "samples/samplecerts/client-key.pem") + if err != nil { + panic(err) + } + + return &tls.Config{ + RootCAs: certpool, + ClientAuth: tls.NoClientCert, + ClientCAs: nil, + InsecureSkipVerify: true, + Certificates: []tls.Certificate{cert}, + } +} + +/* uncomment this if you have ssl setup +func Test_Start_Ssl(t *testing.T) { + tlsconfig := NewTlsConfig() + ops := NewClientOptions().SetClientID("StartSsl"). + AddBroker(FVT_SSL). + SetStore(NewFileStore("/tmp/fvt/Start_Ssl")). + SetTlsConfig(tlsconfig) + + c := NewClient(ops) + + _, err := c.Connect() + if err != nil { + t.Fatalf("Error on Client.Connect(): %v", err) + } + + c.Disconnect(250) +} +*/ + +func Test_Publish_1(t *testing.T) { + ops := NewClientOptions() + ops.AddBroker(FVTTCP) + ops.SetClientID("Publish_1") + ops.SetStore(NewFileStore("/tmp/fvt/Publish_1")) + + c := NewClient(ops) + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + + c.Publish("test/Publish", 0, false, "Publish qo0") + + c.Disconnect(250) +} + +func Test_Publish_2(t *testing.T) { + ops := NewClientOptions() + ops.AddBroker(FVTTCP) + ops.SetClientID("Publish_2") + ops.SetStore(NewFileStore("/tmp/fvt/Publish_2")) + + c := NewClient(ops) + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + + c.Publish("/test/Publish", 0, false, "Publish1 qos0") + c.Publish("/test/Publish", 0, false, "Publish2 qos0") + + c.Disconnect(250) +} + +func Test_Publish_3(t *testing.T) { + ops := NewClientOptions() + ops.AddBroker(FVTTCP) + ops.SetClientID("Publish_3") + ops.SetStore(NewFileStore("/tmp/fvt/Publish_3")) + + c := NewClient(ops) + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + + c.Publish("/test/Publish", 0, false, "Publish1 qos0") + c.Publish("/test/Publish", 1, false, "Publish2 qos1") + c.Publish("/test/Publish", 2, false, "Publish2 qos2") + + c.Disconnect(250) +} + +func Test_Subscribe(t *testing.T) { + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("Subscribe_tx") + pops.SetStore(NewFileStore("/tmp/fvt/Subscribe/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("Subscribe_rx") + sops.SetStore(NewFileStore("/tmp/fvt/Subscribe/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + } + sops.SetDefaultPublishHandler(f) + s := NewClient(sops) + + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + s.Subscribe("/test/sub", 0, nil) + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + + p.Publish("/test/sub", 0, false, "Publish qos0") + + p.Disconnect(250) + s.Disconnect(250) +} + +func Test_Will(t *testing.T) { + willmsgc := make(chan string) + + sops := NewClientOptions().AddBroker(FVTTCP) + sops.SetClientID("will-giver") + sops.SetWill("/wills", "good-byte!", 0, false) + sops.SetConnectionLostHandler(func(client *Client, err error) { + fmt.Println("OnConnectionLost!") + }) + c := NewClient(sops) + + wops := NewClientOptions() + wops.AddBroker(FVTTCP) + wops.SetClientID("will-subscriber") + wops.SetStore(NewFileStore("/tmp/fvt/Will")) + wops.SetDefaultPublishHandler(func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + willmsgc <- string(msg.Payload()) + }) + wsub := NewClient(wops) + + wToken := wsub.Connect() + if wToken.Wait() && wToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", wToken.Error()) + } + + wsub.Subscribe("/wills", 0, nil) + + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + time.Sleep(time.Duration(1) * time.Second) + + c.forceDisconnect() + + wsub.Disconnect(250) + + if <-willmsgc != "good-byte!" { + t.Fatalf("will message did not have correct payload") + } +} + +func Test_Binary_Will(t *testing.T) { + willmsgc := make(chan []byte) + will := []byte{ + 0xDE, + 0xAD, + 0xBE, + 0xEF, + } + + sops := NewClientOptions().AddBroker(FVTTCP) + sops.SetClientID("will-giver") + sops.SetBinaryWill("/wills", will, 0, false) + sops.SetConnectionLostHandler(func(client *Client, err error) { + }) + c := NewClient(sops) + + wops := NewClientOptions().AddBroker(FVTTCP) + wops.SetClientID("will-subscriber") + wops.SetStore(NewFileStore("/tmp/fvt/Binary_Will")) + wops.SetDefaultPublishHandler(func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %v\n", msg.Payload()) + willmsgc <- msg.Payload() + }) + wsub := NewClient(wops) + + wToken := wsub.Connect() + if wToken.Wait() && wToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", wToken.Error()) + } + + wsub.Subscribe("/wills", 0, nil) + + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + time.Sleep(time.Duration(1) * time.Second) + + c.forceDisconnect() + + wsub.Disconnect(250) + + if !bytes.Equal(<-willmsgc, will) { + t.Fatalf("will message did not have correct payload") + } +} + +/** +"[...] a publisher is responsible for determining the maximum QoS a +message can be delivered at, but a subscriber is able to downgrade +the QoS to one more suitable for its usage. +The QoS of a message is never upgraded." +**/ + +/*********************************** + * Tests to cover the 9 QoS combos * + ***********************************/ + +func wait(c chan bool) { + fmt.Println("choke is waiting") + <-c +} + +// Pub 0, Sub 0 + +func Test_p0s0(t *testing.T) { + store := "/tmp/fvt/p0s0" + topic := "/test/p0s0" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p0s0-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p0s0-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 0, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 0, false, "p0s0 payload 1") + p.Publish(topic, 0, false, "p0s0 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 0, false, "p0s0 payload 3") + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 0, Sub 1 + +func Test_p0s1(t *testing.T) { + store := "/tmp/fvt/p0s1" + topic := "/test/p0s1" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p0s1-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p0s1-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 1, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 0, false, "p0s1 payload 1") + p.Publish(topic, 0, false, "p0s1 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 0, false, "p0s1 payload 3") + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 0, Sub 2 + +func Test_p0s2(t *testing.T) { + store := "/tmp/fvt/p0s2" + topic := "/test/p0s2" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p0s2-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p0s2-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 2, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 0, false, "p0s2 payload 1") + p.Publish(topic, 0, false, "p0s2 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 0, false, "p0s2 payload 3") + + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 1, Sub 0 + +func Test_p1s0(t *testing.T) { + store := "/tmp/fvt/p1s0" + topic := "/test/p1s0" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p1s0-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p1s0-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 0, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 1, false, "p1s0 payload 1") + p.Publish(topic, 1, false, "p1s0 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 1, false, "p1s0 payload 3") + + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 1, Sub 1 + +func Test_p1s1(t *testing.T) { + store := "/tmp/fvt/p1s1" + topic := "/test/p1s1" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p1s1-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p1s1-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 1, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 1, false, "p1s1 payload 1") + p.Publish(topic, 1, false, "p1s1 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 1, false, "p1s1 payload 3") + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 1, Sub 2 + +func Test_p1s2(t *testing.T) { + store := "/tmp/fvt/p1s2" + topic := "/test/p1s2" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p1s2-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p1s2-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 2, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 1, false, "p1s2 payload 1") + p.Publish(topic, 1, false, "p1s2 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 1, false, "p1s2 payload 3") + + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 2, Sub 0 + +func Test_p2s0(t *testing.T) { + store := "/tmp/fvt/p2s0" + topic := "/test/p2s0" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p2s0-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p2s0-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 0, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 2, false, "p2s0 payload 1") + p.Publish(topic, 2, false, "p2s0 payload 2") + wait(choke) + wait(choke) + + p.Publish(topic, 2, false, "p2s0 payload 3") + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 2, Sub 1 + +func Test_p2s1(t *testing.T) { + store := "/tmp/fvt/p2s1" + topic := "/test/p2s1" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p2s1-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p2s1-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 1, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 2, false, "p2s1 payload 1") + p.Publish(topic, 2, false, "p2s1 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 2, false, "p2s1 payload 3") + + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +// Pub 2, Sub 2 + +func Test_p2s2(t *testing.T) { + store := "/tmp/fvt/p2s2" + topic := "/test/p2s2" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("p2s2-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("p2s2-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 2, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + p.Publish(topic, 2, false, "p2s2 payload 1") + p.Publish(topic, 2, false, "p2s2 payload 2") + + wait(choke) + wait(choke) + + p.Publish(topic, 2, false, "p2s2 payload 3") + + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +func Test_PublishMessage(t *testing.T) { + store := "/tmp/fvt/PublishMessage" + topic := "/test/pubmsg" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("pubmsg-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("pubmsg-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + if string(msg.Payload()) != "pubmsg payload" { + fmt.Println("Message payload incorrect", msg.Payload(), len("pubmsg payload")) + t.Fatalf("Message payload incorrect") + } + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 2, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + + text := "pubmsg payload" + p.Publish(topic, 0, false, text) + p.Publish(topic, 0, false, text) + wait(choke) + wait(choke) + + p.Publish(topic, 0, false, text) + wait(choke) + + p.Disconnect(250) + s.Disconnect(250) + + chkcond(isemptydir(store + "/p")) + chkcond(isemptydir(store + "/s")) +} + +func Test_PublishEmptyMessage(t *testing.T) { + store := "/tmp/fvt/PublishEmptyMessage" + topic := "/test/pubmsgempty" + choke := make(chan bool) + + pops := NewClientOptions() + pops.AddBroker(FVTTCP) + pops.SetClientID("pubmsgempty-pub") + pops.SetStore(NewFileStore(store + "/p")) + p := NewClient(pops) + + sops := NewClientOptions() + sops.AddBroker(FVTTCP) + sops.SetClientID("pubmsgempty-sub") + sops.SetStore(NewFileStore(store + "/s")) + var f MessageHandler = func(client *Client, msg Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + if string(msg.Payload()) != "" { + t.Fatalf("Message payload incorrect") + } + choke <- true + } + sops.SetDefaultPublishHandler(f) + + s := NewClient(sops) + sToken := s.Connect() + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) + } + + sToken = s.Subscribe(topic, 2, nil) + if sToken.Wait() && sToken.Error() != nil { + t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) + } + + pToken := p.Connect() + if pToken.Wait() && pToken.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) + } + + p.Publish(topic, 0, false, "") + p.Publish(topic, 0, false, "") + wait(choke) + wait(choke) + + p.Publish(topic, 0, false, "") + wait(choke) + + p.Disconnect(250) +} + +// func Test_Cleanstore(t *testing.T) { +// store := "/tmp/fvt/cleanstore" +// topic := "/test/cleanstore" + +// pops := NewClientOptions() +// pops.AddBroker(FVTTCP) +// pops.SetClientID("cleanstore-pub") +// pops.SetStore(NewFileStore(store + "/p")) +// p := NewClient(pops) + +// var s *Client +// sops := NewClientOptions() +// sops.AddBroker(FVTTCP) +// sops.SetClientID("cleanstore-sub") +// sops.SetCleanSession(false) +// sops.SetStore(NewFileStore(store + "/s")) +// var f MessageHandler = func(client *Client, msg Message) { +// fmt.Printf("TOPIC: %s\n", msg.Topic()) +// fmt.Printf("MSG: %s\n", msg.Payload()) +// // Close the connection after receiving +// // the first message so that hopefully +// // there is something in the store to be +// // cleaned. +// s.ForceDisconnect() +// } +// sops.SetDefaultPublishHandler(f) + +// s = NewClient(sops) +// sToken := s.Connect() +// if sToken.Wait() && sToken.Error() != nil { +// t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) +// } + +// sToken = s.Subscribe(topic, 2, nil) +// if sToken.Wait() && sToken.Error() != nil { +// t.Fatalf("Error on Client.Subscribe(): %v", sToken.Error()) +// } + +// pToken := p.Connect() +// if pToken.Wait() && pToken.Error() != nil { +// t.Fatalf("Error on Client.Connect(): %v", pToken.Error()) +// } + +// text := "test message" +// p.Publish(topic, 0, false, text) +// p.Publish(topic, 0, false, text) +// p.Publish(topic, 0, false, text) + +// p.Disconnect(250) + +// s2ops := NewClientOptions() +// s2ops.AddBroker(FVTTCP) +// s2ops.SetClientID("cleanstore-sub") +// s2ops.SetCleanSession(true) +// s2ops.SetStore(NewFileStore(store + "/s")) +// s2ops.SetDefaultPublishHandler(f) + +// s2 := NewClient(s2ops) +// sToken = s2.Connect() +// if sToken.Wait() && sToken.Error() != nil { +// t.Fatalf("Error on Client.Connect(): %v", sToken.Error()) +// } + +// // at this point existing state should be cleared... +// // how to check? +// } + +func Test_MultipleURLs(t *testing.T) { + ops := NewClientOptions() + ops.AddBroker("tcp://127.0.0.1:10000") + ops.AddBroker(FVTTCP) + ops.SetClientID("MutliURL") + ops.SetStore(NewFileStore("/tmp/fvt/MultiURL")) + + c := NewClient(ops) + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + + token = c.Publish("/test/MultiURL", 0, false, "Publish qo0") + token.Wait() + + c.Disconnect(250) +} + +/* +// A test to make sure ping mechanism is working +// This test can be left commented out because it's annoying to wait for +func Test_ping3_idle10(t *testing.T) { + ops := NewClientOptions() + ops.AddBroker(FVTTCP) + //ops.AddBroker("tcp://test.mosquitto.org:1883") + ops.SetClientID("p3i10") + ops.SetKeepAlive(4) + + c := NewClient(ops) + token := c.Connect() + if token.Wait() && token.Error() != nil { + t.Fatalf("Error on Client.Connect(): %v", token.Error()) + } + time.Sleep(time.Duration(10) * time.Second) + c.Disconnect(250) +} +*/ diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go new file mode 100644 index 000000000..d74490b54 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_store_test.go @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "bytes" + "fmt" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "testing" +) + +/******************************* + **** Some helper functions **** + *******************************/ + +func b2s(bs []byte) string { + s := "" + for _, b := range bs { + s += fmt.Sprintf("%x ", b) + } + return s +} + +/********************************************** + **** A mock store implementation for test **** + **********************************************/ + +type TestStore struct { + mput []uint16 + mget []uint16 + mdel []uint16 +} + +func (ts *TestStore) Open() { +} + +func (ts *TestStore) Close() { +} + +func (ts *TestStore) Put(key string, m packets.ControlPacket) { + ts.mput = append(ts.mput, m.Details().MessageID) +} + +func (ts *TestStore) Get(key string) packets.ControlPacket { + mid := mIDFromKey(key) + ts.mget = append(ts.mget, mid) + return nil +} + +func (ts *TestStore) All() []string { + return nil +} + +func (ts *TestStore) Del(key string) { + mid := mIDFromKey(key) + ts.mdel = append(ts.mdel, mid) +} + +func (ts *TestStore) Reset() { +} + +/******************* + **** FileStore **** + *******************/ + +func Test_NewFileStore(t *testing.T) { + storedir := "/tmp/TestStore/_new" + f := NewFileStore(storedir) + if f.opened { + t.Fatalf("filestore was opened without opening it") + } + if f.directory != storedir { + t.Fatalf("filestore directory is wrong") + } + // storedir might exist or might not, just like with a real client + // the point is, we don't care, we just want it to exist after it is + // opened +} + +func Test_FileStore_Open(t *testing.T) { + storedir := "/tmp/TestStore/_open" + + f := NewFileStore(storedir) + f.Open() + if !f.opened { + t.Fatalf("filestore was not set open") + } + if f.directory != storedir { + t.Fatalf("filestore directory is wrong") + } + if !exists(storedir) { + t.Fatalf("filestore directory does not exst after opening it") + } +} + +func Test_FileStore_Close(t *testing.T) { + storedir := "/tmp/TestStore/_unopen" + f := NewFileStore(storedir) + f.Open() + if !f.opened { + t.Fatalf("filestore was not set open") + } + if f.directory != storedir { + t.Fatalf("filestore directory is wrong") + } + if !exists(storedir) { + t.Fatalf("filestore directory does not exst after opening it") + } + + f.Close() + if f.opened { + t.Fatalf("filestore was still open after unopen") + } + if !exists(storedir) { + t.Fatalf("filestore was deleted after unopen") + } +} + +func Test_FileStore_write(t *testing.T) { + storedir := "/tmp/TestStore/_write" + f := NewFileStore(storedir) + f.Open() + + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 91 + + key := inboundKeyFromMID(pm.MessageID) + f.Put(key, pm) + + if !exists(storedir + "/i.91.msg") { + t.Fatalf("message not in store") + } + +} + +func Test_FileStore_Get(t *testing.T) { + storedir := "/tmp/TestStore/_get" + f := NewFileStore(storedir) + f.Open() + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "/a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 120 + + key := outboundKeyFromMID(pm.MessageID) + f.Put(key, pm) + + if !exists(storedir + "/o.120.msg") { + t.Fatalf("message not in store") + } + + exp := []byte{ + /* msg type */ + 0x32, // qos 1 + + /* remlen */ + 0x0d, + + /* topic, msg id in varheader */ + 0x00, // length of topic + 0x06, + 0x2F, // / + 0x61, // a + 0x2F, // / + 0x62, // b + 0x2F, // / + 0x63, // c + + /* msg id (is always 2 bytes) */ + 0x00, + 0x78, + + /*payload */ + 0xBE, + 0xEF, + 0xED, + } + + m := f.Get(key) + + if m == nil { + t.Fatalf("message not retreived from store") + } + + var msg bytes.Buffer + m.Write(&msg) + if !bytes.Equal(exp, msg.Bytes()) { + t.Fatal("message from store not same as what went in", msg.Bytes()) + } +} + +func Test_FileStore_All(t *testing.T) { + storedir := "/tmp/TestStore/_all" + f := NewFileStore(storedir) + f.Open() + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 2 + pm.TopicName = "/t/r/v" + pm.Payload = []byte{0x01, 0x02} + pm.MessageID = 121 + + key := outboundKeyFromMID(pm.MessageID) + f.Put(key, pm) + + keys := f.All() + if len(keys) != 1 { + t.Fatalf("FileStore.All does not have the messages") + } + + if keys[0] != "o.121" { + t.Fatalf("FileStore.All has wrong key") + } +} + +func Test_FileStore_Del(t *testing.T) { + storedir := "/tmp/TestStore/_del" + f := NewFileStore(storedir) + f.Open() + + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 17 + + key := inboundKeyFromMID(pm.MessageID) + f.Put(key, pm) + + if !exists(storedir + "/i.17.msg") { + t.Fatalf("message not in store") + } + + f.Del(key) + + if exists(storedir + "/i.17.msg") { + t.Fatalf("message still exists after deletion") + } +} + +func Test_FileStore_Reset(t *testing.T) { + storedir := "/tmp/TestStore/_reset" + f := NewFileStore(storedir) + f.Open() + + pm1 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm1.Qos = 1 + pm1.TopicName = "/q/w/e" + pm1.Payload = []byte{0xBB} + pm1.MessageID = 71 + key1 := inboundKeyFromMID(pm1.MessageID) + f.Put(key1, pm1) + + pm2 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm2.Qos = 1 + pm2.TopicName = "/q/w/e" + pm2.Payload = []byte{0xBB} + pm2.MessageID = 72 + key2 := inboundKeyFromMID(pm2.MessageID) + f.Put(key2, pm2) + + pm3 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm3.Qos = 1 + pm3.TopicName = "/q/w/e" + pm3.Payload = []byte{0xBB} + pm3.MessageID = 73 + key3 := inboundKeyFromMID(pm3.MessageID) + f.Put(key3, pm3) + + pm4 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm4.Qos = 1 + pm4.TopicName = "/q/w/e" + pm4.Payload = []byte{0xBB} + pm4.MessageID = 74 + key4 := inboundKeyFromMID(pm4.MessageID) + f.Put(key4, pm4) + + pm5 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm5.Qos = 1 + pm5.TopicName = "/q/w/e" + pm5.Payload = []byte{0xBB} + pm5.MessageID = 75 + key5 := inboundKeyFromMID(pm5.MessageID) + f.Put(key5, pm5) + + if !exists(storedir + "/i.71.msg") { + t.Fatalf("message not in store") + } + + if !exists(storedir + "/i.72.msg") { + t.Fatalf("message not in store") + } + + if !exists(storedir + "/i.73.msg") { + t.Fatalf("message not in store") + } + + if !exists(storedir + "/i.74.msg") { + t.Fatalf("message not in store") + } + + if !exists(storedir + "/i.75.msg") { + t.Fatalf("message not in store") + } + + f.Reset() + + if exists(storedir + "/i.71.msg") { + t.Fatalf("message still exists after reset") + } + + if exists(storedir + "/i.72.msg") { + t.Fatalf("message still exists after reset") + } + + if exists(storedir + "/i.73.msg") { + t.Fatalf("message still exists after reset") + } + + if exists(storedir + "/i.74.msg") { + t.Fatalf("message still exists after reset") + } + + if exists(storedir + "/i.75.msg") { + t.Fatalf("message still exists after reset") + } +} + +/******************* + *** MemoryStore *** + *******************/ + +func Test_NewMemoryStore(t *testing.T) { + m := NewMemoryStore() + if m == nil { + t.Fatalf("MemoryStore could not be created") + } +} + +func Test_MemoryStore_Open(t *testing.T) { + m := NewMemoryStore() + m.Open() + if !m.opened { + t.Fatalf("MemoryStore was not set open") + } +} + +func Test_MemoryStore_Close(t *testing.T) { + m := NewMemoryStore() + m.Open() + if !m.opened { + t.Fatalf("MemoryStore was not set open") + } + + m.Close() + if m.opened { + t.Fatalf("MemoryStore was still open after unopen") + } +} + +func Test_MemoryStore_Reset(t *testing.T) { + m := NewMemoryStore() + m.Open() + + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 2 + pm.TopicName = "/f/r/s" + pm.Payload = []byte{0xAB} + pm.MessageID = 81 + + key := outboundKeyFromMID(pm.MessageID) + m.Put(key, pm) + + if len(m.messages) != 1 { + t.Fatalf("message not in memstore") + } + + m.Reset() + + if len(m.messages) != 0 { + t.Fatalf("reset did not clear memstore") + } +} + +func Test_MemoryStore_write(t *testing.T) { + m := NewMemoryStore() + m.Open() + + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "/a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 91 + key := inboundKeyFromMID(pm.MessageID) + m.Put(key, pm) + + if len(m.messages) != 1 { + t.Fatalf("message not in store") + } +} + +func Test_MemoryStore_Get(t *testing.T) { + m := NewMemoryStore() + m.Open() + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "/a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 120 + + key := outboundKeyFromMID(pm.MessageID) + m.Put(key, pm) + + if len(m.messages) != 1 { + t.Fatalf("message not in store") + } + + exp := []byte{ + /* msg type */ + 0x32, // qos 1 + + /* remlen */ + 0x0d, + + /* topic, msg id in varheader */ + 0x00, // length of topic + 0x06, + 0x2F, // / + 0x61, // a + 0x2F, // / + 0x62, // b + 0x2F, // / + 0x63, // c + + /* msg id (is always 2 bytes) */ + 0x00, + 0x78, + + /*payload */ + 0xBE, + 0xEF, + 0xED, + } + + msg := m.Get(key) + + if msg == nil { + t.Fatalf("message not retreived from store") + } + + var buf bytes.Buffer + msg.Write(&buf) + if !bytes.Equal(exp, buf.Bytes()) { + t.Fatalf("message from store not same as what went in") + } +} + +func Test_MemoryStore_Del(t *testing.T) { + m := NewMemoryStore() + m.Open() + + pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pm.Qos = 1 + pm.TopicName = "/a/b/c" + pm.Payload = []byte{0xBE, 0xEF, 0xED} + pm.MessageID = 17 + + key := outboundKeyFromMID(pm.MessageID) + + m.Put(key, pm) + + if len(m.messages) != 1 { + t.Fatalf("message not in store") + } + + m.Del(key) + + if len(m.messages) != 1 { + t.Fatalf("message still exists after deletion") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go new file mode 100644 index 000000000..6afd9be08 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/fvt_test.go @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +// Use setup_IMA.sh for IBM's MessageSight +// Use fvt/rsmb.cfg for IBM's Really Small Message Broker +// Use fvt/mosquitto.cfg for the open source Mosquitto project + +// Set these values to the URI of your MQTT Broker before running go-test +const ( + FVTAddr = "iot.eclipse.org" + FVTTCP = "tcp://" + FVTAddr + ":1883" + FVTSSL = "ssl://" + FVTAddr + ":8883" +) diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go new file mode 100644 index 000000000..690a2cca5 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/memstore.go @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "sync" +) + +// MemoryStore implements the store interface to provide a "persistence" +// mechanism wholly stored in memory. This is only useful for +// as long as the client instance exists. +type MemoryStore struct { + sync.RWMutex + messages map[string]packets.ControlPacket + opened bool +} + +// NewMemoryStore returns a pointer to a new instance of +// MemoryStore, the instance is not initialized and ready to +// use until Open() has been called on it. +func NewMemoryStore() *MemoryStore { + store := &MemoryStore{ + messages: make(map[string]packets.ControlPacket), + opened: false, + } + return store +} + +// Open initializes a MemoryStore instance. +func (store *MemoryStore) Open() { + store.Lock() + defer store.Unlock() + store.opened = true + DEBUG.Println(STR, "memorystore initialized") +} + +// Put takes a key and a pointer to a Message and stores the +// message. +func (store *MemoryStore) Put(key string, message packets.ControlPacket) { + store.Lock() + defer store.Unlock() + chkcond(store.opened) + store.messages[key] = message +} + +// Get takes a key and looks in the store for a matching Message +// returning either the Message pointer or nil. +func (store *MemoryStore) Get(key string) packets.ControlPacket { + store.RLock() + defer store.RUnlock() + chkcond(store.opened) + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + CRITICAL.Println(STR, "memorystore get: message", mid, "not found") + } else { + DEBUG.Println(STR, "memorystore get: message", mid, "found") + } + return m +} + +// All returns a slice of strings containing all the keys currently +// in the MemoryStore. +func (store *MemoryStore) All() []string { + store.RLock() + defer store.RUnlock() + chkcond(store.opened) + keys := []string{} + for k := range store.messages { + keys = append(keys, k) + } + return keys +} + +// Del takes a key, searches the MemoryStore and if the key is found +// deletes the Message pointer associated with it. +func (store *MemoryStore) Del(key string) { + store.Lock() + defer store.Unlock() + mid := mIDFromKey(key) + m := store.messages[key] + if m == nil { + WARN.Println(STR, "memorystore del: message", mid, "not found") + } else { + store.messages[key] = nil + DEBUG.Println(STR, "memorystore del: message", mid, "was deleted") + } +} + +// Close will disallow modifications to the state of the store. +func (store *MemoryStore) Close() { + store.Lock() + defer store.Unlock() + chkcond(store.opened) + store.opened = false + DEBUG.Println(STR, "memorystore closed") +} + +// Reset eliminates all persisted message data in the store. +func (store *MemoryStore) Reset() { + store.Lock() + defer store.Unlock() + chkcond(store.opened) + store.messages = make(map[string]packets.ControlPacket) + WARN.Println(STR, "memorystore wiped") +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go new file mode 100644 index 000000000..0f53a1701 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/message.go @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" +) + +// Message defines the externals that a message implementation must support +// these are received messages that are passed to the callbacks, not internal +// messages +type Message interface { + Duplicate() bool + Qos() byte + Retained() bool + Topic() string + MessageID() uint16 + Payload() []byte +} + +type message struct { + duplicate bool + qos byte + retained bool + topic string + messageID uint16 + payload []byte +} + +func (m *message) Duplicate() bool { + return m.duplicate +} + +func (m *message) Qos() byte { + return m.qos +} + +func (m *message) Retained() bool { + return m.retained +} + +func (m *message) Topic() string { + return m.topic +} + +func (m *message) MessageID() uint16 { + return m.messageID +} + +func (m *message) Payload() []byte { + return m.payload +} + +func messageFromPublish(p *packets.PublishPacket) Message { + return &message{ + duplicate: p.Dup, + qos: p.Qos, + retained: p.Retain, + topic: p.TopicName, + messageID: p.MessageID, + payload: p.Payload, + } +} + +func newConnectMsgFromOptions(options *ClientOptions) *packets.ConnectPacket { + m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) + + m.CleanSession = options.CleanSession + m.WillFlag = options.WillEnabled + m.WillRetain = options.WillRetained + m.ClientIdentifier = options.ClientID + + if options.WillEnabled { + m.WillQos = options.WillQos + m.WillTopic = options.WillTopic + m.WillMessage = options.WillPayload + } + + if options.Username != "" { + m.UsernameFlag = true + m.Username = options.Username + //mustn't have password without user as well + if options.Password != "" { + m.PasswordFlag = true + m.Password = []byte(options.Password) + } + } + + m.KeepaliveTimer = uint16(options.KeepAlive) + + return m +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go new file mode 100644 index 000000000..a6fc3ae4b --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/messageids.go @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "sync" +) + +// MId is 16 bit message id as specified by the MQTT spec. +// In general, these values should not be depended upon by +// the client application. +type MId uint16 + +type messageIds struct { + sync.RWMutex + index map[uint16]Token +} + +const ( + midMin uint16 = 1 + midMax uint16 = 65535 +) + +func (mids *messageIds) freeID(id uint16) { + mids.Lock() + defer mids.Unlock() + delete(mids.index, id) +} + +func (mids *messageIds) getID(t Token) uint16 { + mids.Lock() + defer mids.Unlock() + for i := midMin; i < midMax; i++ { + if _, ok := mids.index[i]; !ok { + mids.index[i] = t + return i + } + } + return 0 +} + +func (mids *messageIds) getToken(id uint16) Token { + mids.RLock() + defer mids.RUnlock() + if token, ok := mids.index[id]; ok { + return token + } + return nil +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go new file mode 100644 index 000000000..daee86820 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "errors" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "golang.org/x/net/websocket" + "net" + "net/url" + "reflect" + "time" +) + +func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration) (net.Conn, error) { + switch uri.Scheme { + case "ws": + conn, err := websocket.Dial(uri.String(), "mqtt", "ws://localhost") + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "wss": + config, _ := websocket.NewConfig(uri.String(), "ws://localhost") + config.Protocol = []string{"mqtt"} + config.TlsConfig = tlsc + conn, err := websocket.DialConfig(config) + if err != nil { + return nil, err + } + conn.PayloadType = websocket.BinaryFrame + return conn, err + case "tcp": + conn, err := net.DialTimeout("tcp", uri.Host, timeout) + if err != nil { + return nil, err + } + return conn, nil + case "ssl": + fallthrough + case "tls": + fallthrough + case "tcps": + conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc) + if err != nil { + return nil, err + } + return conn, nil + } + return nil, errors.New("Unknown protocol") +} + +// actually read incoming messages off the wire +// send Message object into ibound channel +func incoming(c *Client) { + defer c.workers.Done() + var err error + var cp packets.ControlPacket + + DEBUG.Println(NET, "incoming started") + + for { + if cp, err = packets.ReadPacket(c.conn); err != nil { + break + } + DEBUG.Println(NET, "Received Message") + c.ibound <- cp + } + // We received an error on read. + // If disconnect is in progress, swallow error and return + select { + case <-c.stop: + DEBUG.Println(NET, "incoming stopped") + return + // Not trying to disconnect, send the error to the errors channel + default: + ERROR.Println(NET, "incoming stopped with error") + c.errors <- err + return + } +} + +// receive a Message object on obound, and then +// actually send outgoing message to the wire +func outgoing(c *Client) { + defer c.workers.Done() + DEBUG.Println(NET, "outgoing started") + + for { + DEBUG.Println(NET, "outgoing waiting for an outbound message") + select { + case <-c.stop: + DEBUG.Println(NET, "outgoing stopped") + return + case pub := <-c.obound: + msg := pub.p.(*packets.PublishPacket) + if msg.Qos != 0 && msg.MessageID == 0 { + msg.MessageID = c.getID(pub.t) + pub.t.(*PublishToken).messageID = msg.MessageID + } + //persist_obound(c.persist, msg) + + if c.options.WriteTimeout > 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout)) + } + + if err := msg.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error") + c.errors <- err + return + } + + if c.options.WriteTimeout > 0 { + // If we successfully wrote, we don't want the timeout to happen during an idle period + // so we reset it to infinite. + c.conn.SetWriteDeadline(time.Time{}) + } + + if msg.Qos == 0 { + pub.t.flowComplete() + } + + c.lastContact.update() + DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID) + case msg := <-c.oboundP: + switch msg.p.(type) { + case *packets.SubscribePacket: + msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t) + case *packets.UnsubscribePacket: + msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t) + } + DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p)) + if err := msg.p.Write(c.conn); err != nil { + ERROR.Println(NET, "outgoing stopped with error") + c.errors <- err + return + } + c.lastContact.update() + switch msg.p.(type) { + case *packets.DisconnectPacket: + msg.t.(*DisconnectToken).flowComplete() + DEBUG.Println(NET, "outbound wrote disconnect, stopping") + return + } + } + } +} + +// receive Message objects on ibound +// store messages if necessary +// send replies on obound +// delete messages from store if necessary +func alllogic(c *Client) { + + DEBUG.Println(NET, "logic started") + + for { + DEBUG.Println(NET, "logic waiting for msg on ibound") + + select { + case msg := <-c.ibound: + DEBUG.Println(NET, "logic got msg on ibound") + //persist_ibound(c.persist, msg) + switch msg.(type) { + case *packets.PingrespPacket: + DEBUG.Println(NET, "received pingresp") + c.pingOutstanding = false + case *packets.SubackPacket: + sa := msg.(*packets.SubackPacket) + DEBUG.Println(NET, "received suback, id:", sa.MessageID) + token := c.getToken(sa.MessageID).(*SubscribeToken) + DEBUG.Println(NET, "granted qoss", sa.GrantedQoss) + for i, qos := range sa.GrantedQoss { + token.subResult[token.subs[i]] = qos + } + token.flowComplete() + go c.freeID(sa.MessageID) + case *packets.UnsubackPacket: + ua := msg.(*packets.UnsubackPacket) + DEBUG.Println(NET, "received unsuback, id:", ua.MessageID) + token := c.getToken(ua.MessageID).(*UnsubscribeToken) + token.flowComplete() + go c.freeID(ua.MessageID) + case *packets.PublishPacket: + pp := msg.(*packets.PublishPacket) + DEBUG.Println(NET, "received publish, msgId:", pp.MessageID) + DEBUG.Println(NET, "putting msg on onPubChan") + switch pp.Qos { + case 2: + c.incomingPubChan <- pp + DEBUG.Println(NET, "done putting msg on incomingPubChan") + pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + pr.MessageID = pp.MessageID + DEBUG.Println(NET, "putting pubrec msg on obound") + c.oboundP <- &PacketAndToken{p: pr, t: nil} + DEBUG.Println(NET, "done putting pubrec msg on obound") + case 1: + c.incomingPubChan <- pp + DEBUG.Println(NET, "done putting msg on incomingPubChan") + pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + pa.MessageID = pp.MessageID + DEBUG.Println(NET, "putting puback msg on obound") + c.oboundP <- &PacketAndToken{p: pa, t: nil} + DEBUG.Println(NET, "done putting puback msg on obound") + case 0: + select { + case c.incomingPubChan <- pp: + DEBUG.Println(NET, "done putting msg on incomingPubChan") + case err, ok := <-c.errors: + DEBUG.Println(NET, "error while putting msg on pubChanZero") + // We are unblocked, but need to put the error back on so the outer + // select can handle it appropriately. + if ok { + go func(errVal error, errChan chan error) { + errChan <- errVal + }(err, c.errors) + } + } + } + case *packets.PubackPacket: + pa := msg.(*packets.PubackPacket) + DEBUG.Println(NET, "received puback, id:", pa.MessageID) + // c.receipts.get(msg.MsgId()) <- Receipt{} + // c.receipts.end(msg.MsgId()) + c.getToken(pa.MessageID).flowComplete() + c.freeID(pa.MessageID) + case *packets.PubrecPacket: + prec := msg.(*packets.PubrecPacket) + DEBUG.Println(NET, "received pubrec, id:", prec.MessageID) + prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + prel.MessageID = prec.MessageID + select { + case c.oboundP <- &PacketAndToken{p: prel, t: nil}: + case <-time.After(time.Second): + } + case *packets.PubrelPacket: + pr := msg.(*packets.PubrelPacket) + DEBUG.Println(NET, "received pubrel, id:", pr.MessageID) + pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + pc.MessageID = pr.MessageID + select { + case c.oboundP <- &PacketAndToken{p: pc, t: nil}: + case <-time.After(time.Second): + } + case *packets.PubcompPacket: + pc := msg.(*packets.PubcompPacket) + DEBUG.Println(NET, "received pubcomp, id:", pc.MessageID) + c.getToken(pc.MessageID).flowComplete() + c.freeID(pc.MessageID) + } + case <-c.stop: + WARN.Println(NET, "logic stopped") + return + case err := <-c.errors: + ERROR.Println(NET, "logic got error") + c.internalConnLost(err) + return + } + c.lastContact.update() + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go new file mode 100644 index 000000000..9598bd6b3 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net_test.go @@ -0,0 +1,17 @@ +package mqtt + +import ( + "errors" + "fmt" + "strconv" + "testing" +) + +func Test_openConnection(t *testing.T) { + _, err := strconv.Atoi("") + e := fmt.Errorf(" : %s", err) + t.Errorf("%#v", e) + + e1 := errors.New("hogehoge %s") + t.Errorf("%#v", e1) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html new file mode 100644 index 000000000..f19c483b9 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/notice.html @@ -0,0 +1,108 @@ + + + + + +Eclipse Foundation Software User Agreement + + + +

Eclipse Foundation Software User Agreement

+

February 1, 2011

+ +

Usage Of Content

+ +

THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS + (COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND + CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE + OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR + NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND + CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.

+ +

Applicable Licenses

+ +

Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0 + ("EPL"). A copy of the EPL is provided with this Content and is also available at http://www.eclipse.org/legal/epl-v10.html. + For purposes of the EPL, "Program" will mean the Content.

+ +

Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code + repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").

+ +
    +
  • Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").
  • +
  • Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".
  • +
  • A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins + and/or Fragments associated with that Feature.
  • +
  • Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.
  • +
+ +

The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and +Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module +including, but not limited to the following locations:

+ +
    +
  • The top-level (root) directory
  • +
  • Plug-in and Fragment directories
  • +
  • Inside Plug-ins and Fragments packaged as JARs
  • +
  • Sub-directories of the directory named "src" of certain Plug-ins
  • +
  • Feature directories
  • +
+ +

Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the +installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or +inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature. +Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in +that directory.

+ +

THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE +OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):

+ + + +

IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please +contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.

+ + +

Use of Provisioning Technology

+ +

The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse + Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or + other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to + install, extend and update Eclipse-based products. Information about packaging Installable Software is available at http://eclipse.org/equinox/p2/repository_packaging.html + ("Specification").

+ +

You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the + applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology + in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the + Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:

+ +
    +
  1. A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology + on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based + product.
  2. +
  3. During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be + accessed and copied to the Target Machine.
  4. +
  5. Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable + Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target + Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern + the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such + indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.
  6. +
+ +

Cryptography

+ +

Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to + another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import, + possession, or use, and re-export of encryption software, to see if this is permitted.

+ +

Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.

+ + diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go new file mode 100644 index 000000000..f15a9bae1 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/oops.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +func chkerr(e error) { + if e != nil { + panic(e) + } +} + +func chkcond(b bool) { + if !b { + panic("oops") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go new file mode 100644 index 000000000..156b7c0e5 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/options.go @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "net/url" + "time" +) + +// MessageHandler is a callback type which can be set to be +// executed upon the arrival of messages published to topics +// to which the client is subscribed. +type MessageHandler func(*Client, Message) + +// ConnectionLostHandler is a callback type which can be set to be +// executed upon an unintended disconnection from the MQTT broker. +// Disconnects caused by calling Disconnect or ForceDisconnect will +// not cause an OnConnectionLost callback to execute. +type ConnectionLostHandler func(*Client, error) + +// OnConnectHandler is a callback that is called when the client +// state changes from unconnected/disconnected to connected. Both +// at initial connection and on reconnection +type OnConnectHandler func(*Client) + +// ClientOptions contains configurable options for an Client. +type ClientOptions struct { + Servers []*url.URL + ClientID string + Username string + Password string + CleanSession bool + Order bool + WillEnabled bool + WillTopic string + WillPayload []byte + WillQos byte + WillRetained bool + ProtocolVersion uint + protocolVersionExplicit bool + TLSConfig tls.Config + KeepAlive time.Duration + ConnectTimeout time.Duration + MaxReconnectInterval time.Duration + AutoReconnect bool + Store Store + DefaultPublishHander MessageHandler + OnConnect OnConnectHandler + OnConnectionLost ConnectionLostHandler + WriteTimeout time.Duration +} + +// NewClientOptions will create a new ClientClientOptions type with some +// default values. +// Port: 1883 +// CleanSession: True +// Order: True +// KeepAlive: 30 (seconds) +// ConnectTimeout: 30 (seconds) +// MaxReconnectInterval 10 (minutes) +// AutoReconnect: True +func NewClientOptions() *ClientOptions { + o := &ClientOptions{ + Servers: nil, + ClientID: "", + Username: "", + Password: "", + CleanSession: true, + Order: true, + WillEnabled: false, + WillTopic: "", + WillPayload: nil, + WillQos: 0, + WillRetained: false, + ProtocolVersion: 0, + protocolVersionExplicit: false, + TLSConfig: tls.Config{}, + KeepAlive: 30 * time.Second, + ConnectTimeout: 30 * time.Second, + MaxReconnectInterval: 10 * time.Minute, + AutoReconnect: true, + Store: nil, + OnConnect: nil, + OnConnectionLost: DefaultConnectionLostHandler, + WriteTimeout: 0, // 0 represents timeout disabled + } + return o +} + +// AddBroker adds a broker URI to the list of brokers to be used. The format should be +// scheme://host:port +// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname) +// and "port" is the port on which the broker is accepting connections. +func (o *ClientOptions) AddBroker(server string) *ClientOptions { + brokerURI, _ := url.Parse(server) + o.Servers = append(o.Servers, brokerURI) + return o +} + +// SetClientID will set the client id to be used by this client when +// connecting to the MQTT broker. According to the MQTT v3.1 specification, +// a client id mus be no longer than 23 characters. +func (o *ClientOptions) SetClientID(id string) *ClientOptions { + o.ClientID = id + return o +} + +// SetUsername will set the username to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetUsername(u string) *ClientOptions { + o.Username = u + return o +} + +// SetPassword will set the password to be used by this client when connecting +// to the MQTT broker. Note: without the use of SSL/TLS, this information will +// be sent in plaintext accross the wire. +func (o *ClientOptions) SetPassword(p string) *ClientOptions { + o.Password = p + return o +} + +// SetCleanSession will set the "clean session" flag in the connect message +// when this client connects to an MQTT broker. By setting this flag, you are +// indicating that no messages saved by the broker for this client should be +// delivered. Any messages that were going to be sent by this client before +// diconnecting previously but didn't will not be sent upon connecting to the +// broker. +func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions { + o.CleanSession = clean + return o +} + +// SetOrderMatters will set the message routing to guarantee order within +// each QoS level. By default, this value is true. If set to false, +// this flag indicates that messages can be delivered asynchronously +// from the client to the application and possibly arrive out of order. +func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions { + o.Order = order + return o +} + +// SetTLSConfig will set an SSL/TLS configuration to be used when connecting +// to an MQTT broker. Please read the official Go documentation for more +// information. +func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions { + o.TLSConfig = *t + return o +} + +// SetStore will set the implementation of the Store interface +// used to provide message persistence in cases where QoS levels +// QoS_ONE or QoS_TWO are used. If no store is provided, then the +// client will use MemoryStore by default. +func (o *ClientOptions) SetStore(s Store) *ClientOptions { + o.Store = s + return o +} + +// SetKeepAlive will set the amount of time (in seconds) that the client +// should wait before sending a PING request to the broker. This will +// allow the client to know that a connection has not been lost with the +// server. +func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions { + o.KeepAlive = k + return o +} + +// SetProtocolVersion sets the MQTT version to be used to connect to the +// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1 +func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions { + if pv >= 3 && pv <= 4 { + o.ProtocolVersion = pv + o.protocolVersionExplicit = true + } + return o +} + +// UnsetWill will cause any set will message to be disregarded. +func (o *ClientOptions) UnsetWill() *ClientOptions { + o.WillEnabled = false + return o +} + +// SetWill accepts a string will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions { + o.SetBinaryWill(topic, []byte(payload), qos, retained) + return o +} + +// SetBinaryWill accepts a []byte will message to be set. When the client connects, +// it will give this will message to the broker, which will then publish the +// provided payload (the will) to any clients that are subscribed to the provided +// topic. +func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions { + o.WillEnabled = true + o.WillTopic = topic + o.WillPayload = payload + o.WillQos = qos + o.WillRetained = retained + return o +} + +// SetDefaultPublishHandler sets the MessageHandler that will be called when a message +// is received that does not match any known subscriptions. +func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions { + o.DefaultPublishHander = defaultHandler + return o +} + +// SetOnConnectHandler sets the function to be called when the client is connected. Both +// at initial connection time and upon automatic reconnect. +func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions { + o.OnConnect = onConn + return o +} + +// SetConnectionLostHandler will set the OnConnectionLost callback to be executed +// in the case where the client unexpectedly loses connection with the MQTT broker. +func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions { + o.OnConnectionLost = onLost + return o +} + +// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a +// timeout error. A duration of 0 never times out. Default 30 seconds +func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions { + o.WriteTimeout = t + return o +} + +// SetConnectTimeout limits how long the client will wait when trying to open a connection +// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out. +// Default 30 seconds. Currently only operational on TCP/TLS connections. +func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions { + o.ConnectTimeout = t + return o +} + +// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts +// when connection is lost +func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions { + o.MaxReconnectInterval = t + return o +} + +// SetAutoReconnect sets whether the automatic reconnection logic should be used +// when the connection is lost, even if disabled the ConnectionLostHandler is still +// called +func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions { + o.AutoReconnect = a + return o +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go new file mode 100644 index 000000000..729133767 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connack.go @@ -0,0 +1,57 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//ConnackPacket is an internal representation of the fields of the +//Connack MQTT packet +type ConnackPacket struct { + FixedHeader + TopicNameCompression byte + ReturnCode byte + uuid uuid.UUID +} + +func (ca *ConnackPacket) String() string { + str := fmt.Sprintf("%s\n", ca.FixedHeader) + str += fmt.Sprintf("returncode: %d", ca.ReturnCode) + return str +} + +func (ca *ConnackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.WriteByte(ca.TopicNameCompression) + body.WriteByte(ca.ReturnCode) + ca.FixedHeader.RemainingLength = 2 + packet := ca.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ca *ConnackPacket) Unpack(b io.Reader) { + ca.TopicNameCompression = decodeByte(b) + ca.ReturnCode = decodeByte(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ca *ConnackPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (ca *ConnackPacket) UUID() uuid.UUID { + return ca.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go new file mode 100644 index 000000000..283007725 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/connect.go @@ -0,0 +1,128 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//ConnectPacket is an internal representation of the fields of the +//Connect MQTT packet +type ConnectPacket struct { + FixedHeader + ProtocolName string + ProtocolVersion byte + CleanSession bool + WillFlag bool + WillQos byte + WillRetain bool + UsernameFlag bool + PasswordFlag bool + ReservedBit byte + KeepaliveTimer uint16 + + ClientIdentifier string + WillTopic string + WillMessage []byte + Username string + Password []byte + uuid uuid.UUID +} + +func (c *ConnectPacket) String() string { + str := fmt.Sprintf("%s\n", c.FixedHeader) + str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalivetimer: %d\nclientId: %s\nwilltopic: %s\nwillmessage: %s\nUsername: %s\nPassword: %s\n", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.KeepaliveTimer, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password) + return str +} + +func (c *ConnectPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(c.ProtocolName)) + body.WriteByte(c.ProtocolVersion) + body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7) + body.Write(encodeUint16(c.KeepaliveTimer)) + body.Write(encodeString(c.ClientIdentifier)) + if c.WillFlag { + body.Write(encodeString(c.WillTopic)) + body.Write(encodeBytes(c.WillMessage)) + } + if c.UsernameFlag { + body.Write(encodeString(c.Username)) + } + if c.PasswordFlag { + body.Write(encodeBytes(c.Password)) + } + c.FixedHeader.RemainingLength = body.Len() + packet := c.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (c *ConnectPacket) Unpack(b io.Reader) { + c.ProtocolName = decodeString(b) + c.ProtocolVersion = decodeByte(b) + options := decodeByte(b) + c.ReservedBit = 1 & options + c.CleanSession = 1&(options>>1) > 0 + c.WillFlag = 1&(options>>2) > 0 + c.WillQos = 3 & (options >> 3) + c.WillRetain = 1&(options>>5) > 0 + c.PasswordFlag = 1&(options>>6) > 0 + c.UsernameFlag = 1&(options>>7) > 0 + c.KeepaliveTimer = decodeUint16(b) + c.ClientIdentifier = decodeString(b) + if c.WillFlag { + c.WillTopic = decodeString(b) + c.WillMessage = decodeBytes(b) + } + if c.UsernameFlag { + c.Username = decodeString(b) + } + if c.PasswordFlag { + c.Password = decodeBytes(b) + } +} + +//Validate performs validation of the fields of a Connect packet +func (c *ConnectPacket) Validate() byte { + if c.PasswordFlag && !c.UsernameFlag { + return ErrRefusedBadUsernameOrPassword + } + if c.ReservedBit != 0 { + //Bad reserved bit + return ErrProtocolViolation + } + if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) { + //Mismatched or unsupported protocol version + return ErrRefusedBadProtocolVersion + } + if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" { + //Bad protocol name + return ErrProtocolViolation + } + if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 { + //Bad size field + return ErrProtocolViolation + } + return Accepted +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (c *ConnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (c *ConnectPacket) UUID() uuid.UUID { + return c.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go new file mode 100644 index 000000000..2f005fb35 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/disconnect.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//DisconnectPacket is an internal representation of the fields of the +//Disconnect MQTT packet +type DisconnectPacket struct { + FixedHeader + uuid uuid.UUID +} + +func (d *DisconnectPacket) String() string { + str := fmt.Sprintf("%s\n", d.FixedHeader) + return str +} + +func (d *DisconnectPacket) Write(w io.Writer) error { + packet := d.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (d *DisconnectPacket) Unpack(b io.Reader) { +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (d *DisconnectPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (d *DisconnectPacket) UUID() uuid.UUID { + return d.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go new file mode 100644 index 000000000..4cdf3b1f5 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets.go @@ -0,0 +1,324 @@ +package packets + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//ControlPacket defines the interface for structs intended to hold +//decoded MQTT packets, either from being read or before being +//written +type ControlPacket interface { + Write(io.Writer) error + Unpack(io.Reader) + String() string + Details() Details + UUID() uuid.UUID +} + +//PacketNames maps the constants for each of the MQTT packet types +//to a string representation of their name. +var PacketNames = map[uint8]string{ + 1: "CONNECT", + 2: "CONNACK", + 3: "PUBLISH", + 4: "PUBACK", + 5: "PUBREC", + 6: "PUBREL", + 7: "PUBCOMP", + 8: "SUBSCRIBE", + 9: "SUBACK", + 10: "UNSUBSCRIBE", + 11: "UNSUBACK", + 12: "PINGREQ", + 13: "PINGRESP", + 14: "DISCONNECT", +} + +//Below are the constants assigned to each of the MQTT packet types +const ( + Connect = 1 + Connack = 2 + Publish = 3 + Puback = 4 + Pubrec = 5 + Pubrel = 6 + Pubcomp = 7 + Subscribe = 8 + Suback = 9 + Unsubscribe = 10 + Unsuback = 11 + Pingreq = 12 + Pingresp = 13 + Disconnect = 14 +) + +//Below are the const definitions for error codes returned by +//Connect() +const ( + Accepted = 0x00 + ErrRefusedBadProtocolVersion = 0x01 + ErrRefusedIDRejected = 0x02 + ErrRefusedServerUnavailable = 0x03 + ErrRefusedBadUsernameOrPassword = 0x04 + ErrRefusedNotAuthorised = 0x05 + ErrNetworkError = 0xFE + ErrProtocolViolation = 0xFF +) + +//ConnackReturnCodes is a map of the error codes constants for Connect() +//to a string representation of the error +var ConnackReturnCodes = map[uint8]string{ + 0: "Connection Accepted", + 1: "Connection Refused: Bad Protocol Version", + 2: "Connection Refused: Client Identifier Rejected", + 3: "Connection Refused: Server Unavailable", + 4: "Connection Refused: Username or Password in unknown format", + 5: "Connection Refused: Not Authorised", + 254: "Connection Error", + 255: "Connection Refused: Protocol Violation", +} + +//ConnErrors is a map of the errors codes constants for Connect() +//to a Go error +var ConnErrors = map[byte]error{ + Accepted: nil, + ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"), + ErrRefusedIDRejected: errors.New("Identifier rejected"), + ErrRefusedServerUnavailable: errors.New("Server Unavailable"), + ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"), + ErrRefusedNotAuthorised: errors.New("Not Authorized"), + ErrNetworkError: errors.New("Network Error"), + ErrProtocolViolation: errors.New("Protocol Violation"), +} + +//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts +//to read an MQTT packet from the stream. It returns a ControlPacket +//representing the decoded MQTT packet and an error. One of these returns will +//always be nil, a nil ControlPacket indicating an error occurred. +func ReadPacket(r io.Reader) (cp ControlPacket, err error) { + var fh FixedHeader + b := make([]byte, 1) + + _, err = io.ReadFull(r, b) + if err != nil { + return nil, err + } + fh.unpack(b[0], r) + cp = NewControlPacketWithHeader(fh) + if cp == nil { + return nil, errors.New("Bad data from client") + } + packetBytes := make([]byte, fh.RemainingLength) + _, err = io.ReadFull(r, packetBytes) + if err != nil { + return nil, err + } + cp.Unpack(bytes.NewBuffer(packetBytes)) + return cp, nil +} + +//NewControlPacket is used to create a new ControlPacket of the type specified +//by packetType, this is usually done by reference to the packet type constants +//defined in packets.go. The newly created ControlPacket is empty and a pointer +//is returned. +func NewControlPacket(packetType byte) (cp ControlPacket) { + switch packetType { + case Connect: + cp = &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}, uuid: uuid.NewUUID()} + case Connack: + cp = &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}, uuid: uuid.NewUUID()} + case Disconnect: + cp = &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}, uuid: uuid.NewUUID()} + case Publish: + cp = &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}, uuid: uuid.NewUUID()} + case Puback: + cp = &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}, uuid: uuid.NewUUID()} + case Pubrec: + cp = &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}, uuid: uuid.NewUUID()} + case Pubrel: + cp = &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}, uuid: uuid.NewUUID()} + case Pubcomp: + cp = &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}, uuid: uuid.NewUUID()} + case Subscribe: + cp = &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}, uuid: uuid.NewUUID()} + case Suback: + cp = &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}, uuid: uuid.NewUUID()} + case Unsubscribe: + cp = &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}, uuid: uuid.NewUUID()} + case Unsuback: + cp = &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}, uuid: uuid.NewUUID()} + case Pingreq: + cp = &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}, uuid: uuid.NewUUID()} + case Pingresp: + cp = &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}, uuid: uuid.NewUUID()} + default: + return nil + } + return cp +} + +//NewControlPacketWithHeader is used to create a new ControlPacket of the type +//specified within the FixedHeader that is passed to the function. +//The newly created ControlPacket is empty and a pointer is returned. +func NewControlPacketWithHeader(fh FixedHeader) (cp ControlPacket) { + switch fh.MessageType { + case Connect: + cp = &ConnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Connack: + cp = &ConnackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Disconnect: + cp = &DisconnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Publish: + cp = &PublishPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Puback: + cp = &PubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Pubrec: + cp = &PubrecPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Pubrel: + cp = &PubrelPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Pubcomp: + cp = &PubcompPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Subscribe: + cp = &SubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Suback: + cp = &SubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Unsubscribe: + cp = &UnsubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Unsuback: + cp = &UnsubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Pingreq: + cp = &PingreqPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + case Pingresp: + cp = &PingrespPacket{FixedHeader: fh, uuid: uuid.NewUUID()} + default: + return nil + } + return cp +} + +//Details struct returned by the Details() function called on +//ControlPackets to present details of the Qos and MessageID +//of the ControlPacket +type Details struct { + Qos byte + MessageID uint16 +} + +//FixedHeader is a struct to hold the decoded information from +//the fixed header of an MQTT ControlPacket +type FixedHeader struct { + MessageType byte + Dup bool + Qos byte + Retain bool + RemainingLength int +} + +func (fh FixedHeader) String() string { + return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength) +} + +func boolToByte(b bool) byte { + switch b { + case true: + return 1 + default: + return 0 + } +} + +func (fh *FixedHeader) pack() bytes.Buffer { + var header bytes.Buffer + header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain)) + header.Write(encodeLength(fh.RemainingLength)) + return header +} + +func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) { + fh.MessageType = typeAndFlags >> 4 + fh.Dup = (typeAndFlags>>3)&0x01 > 0 + fh.Qos = (typeAndFlags >> 1) & 0x03 + fh.Retain = typeAndFlags&0x01 > 0 + fh.RemainingLength = decodeLength(r) +} + +func decodeByte(b io.Reader) byte { + num := make([]byte, 1) + b.Read(num) + return num[0] +} + +func decodeUint16(b io.Reader) uint16 { + num := make([]byte, 2) + b.Read(num) + return binary.BigEndian.Uint16(num) +} + +func encodeUint16(num uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, num) + return bytes +} + +func encodeString(field string) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, []byte(field)...) +} + +func decodeString(b io.Reader) string { + fieldLength := decodeUint16(b) + field := make([]byte, fieldLength) + b.Read(field) + return string(field) +} + +func decodeBytes(b io.Reader) []byte { + fieldLength := decodeUint16(b) + field := make([]byte, fieldLength) + b.Read(field) + return field +} + +func encodeBytes(field []byte) []byte { + fieldLength := make([]byte, 2) + binary.BigEndian.PutUint16(fieldLength, uint16(len(field))) + return append(fieldLength, field...) +} + +func encodeLength(length int) []byte { + var encLength []byte + for { + digit := byte(length % 128) + length /= 128 + if length > 0 { + digit |= 0x80 + } + encLength = append(encLength, digit) + if length == 0 { + break + } + } + return encLength +} + +func decodeLength(r io.Reader) int { + var rLength uint32 + var multiplier uint32 + b := make([]byte, 1) + for { + io.ReadFull(r, b) + digit := b[0] + rLength |= uint32(digit&127) << multiplier + if (digit & 128) == 0 { + break + } + multiplier += 7 + } + return int(rLength) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go new file mode 100644 index 000000000..51d887d08 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/packets_test.go @@ -0,0 +1,159 @@ +package packets + +import ( + "bytes" + "testing" +) + +func TestPacketNames(t *testing.T) { + if PacketNames[1] != "CONNECT" { + t.Errorf("PacketNames[1] is %s, should be %s", PacketNames[1], "CONNECT") + } + if PacketNames[2] != "CONNACK" { + t.Errorf("PacketNames[2] is %s, should be %s", PacketNames[2], "CONNACK") + } + if PacketNames[3] != "PUBLISH" { + t.Errorf("PacketNames[3] is %s, should be %s", PacketNames[3], "PUBLISH") + } + if PacketNames[4] != "PUBACK" { + t.Errorf("PacketNames[4] is %s, should be %s", PacketNames[4], "PUBACK") + } + if PacketNames[5] != "PUBREC" { + t.Errorf("PacketNames[5] is %s, should be %s", PacketNames[5], "PUBREC") + } + if PacketNames[6] != "PUBREL" { + t.Errorf("PacketNames[6] is %s, should be %s", PacketNames[6], "PUBREL") + } + if PacketNames[7] != "PUBCOMP" { + t.Errorf("PacketNames[7] is %s, should be %s", PacketNames[7], "PUBCOMP") + } + if PacketNames[8] != "SUBSCRIBE" { + t.Errorf("PacketNames[8] is %s, should be %s", PacketNames[8], "SUBSCRIBE") + } + if PacketNames[9] != "SUBACK" { + t.Errorf("PacketNames[9] is %s, should be %s", PacketNames[9], "SUBACK") + } + if PacketNames[10] != "UNSUBSCRIBE" { + t.Errorf("PacketNames[10] is %s, should be %s", PacketNames[10], "UNSUBSCRIBE") + } + if PacketNames[11] != "UNSUBACK" { + t.Errorf("PacketNames[11] is %s, should be %s", PacketNames[11], "UNSUBACK") + } + if PacketNames[12] != "PINGREQ" { + t.Errorf("PacketNames[12] is %s, should be %s", PacketNames[12], "PINGREQ") + } + if PacketNames[13] != "PINGRESP" { + t.Errorf("PacketNames[13] is %s, should be %s", PacketNames[13], "PINGRESP") + } + if PacketNames[14] != "DISCONNECT" { + t.Errorf("PacketNames[14] is %s, should be %s", PacketNames[14], "DISCONNECT") + } +} + +func TestPacketConsts(t *testing.T) { + if Connect != 1 { + t.Errorf("Const for Connect is %d, should be %d", Connect, 1) + } + if Connack != 2 { + t.Errorf("Const for Connack is %d, should be %d", Connack, 2) + } + if Publish != 3 { + t.Errorf("Const for Publish is %d, should be %d", Publish, 3) + } + if Puback != 4 { + t.Errorf("Const for Puback is %d, should be %d", Puback, 4) + } + if Pubrec != 5 { + t.Errorf("Const for Pubrec is %d, should be %d", Pubrec, 5) + } + if Pubrel != 6 { + t.Errorf("Const for Pubrel is %d, should be %d", Pubrel, 6) + } + if Pubcomp != 7 { + t.Errorf("Const for Pubcomp is %d, should be %d", Pubcomp, 7) + } + if Subscribe != 8 { + t.Errorf("Const for Subscribe is %d, should be %d", Subscribe, 8) + } + if Suback != 9 { + t.Errorf("Const for Suback is %d, should be %d", Suback, 9) + } + if Unsubscribe != 10 { + t.Errorf("Const for Unsubscribe is %d, should be %d", Unsubscribe, 10) + } + if Unsuback != 11 { + t.Errorf("Const for Unsuback is %d, should be %d", Unsuback, 11) + } + if Pingreq != 12 { + t.Errorf("Const for Pingreq is %d, should be %d", Pingreq, 12) + } + if Pingresp != 13 { + t.Errorf("Const for Pingresp is %d, should be %d", Pingresp, 13) + } + if Disconnect != 14 { + t.Errorf("Const for Disconnect is %d, should be %d", Disconnect, 14) + } +} + +func TestConnackConsts(t *testing.T) { + if Accepted != 0x00 { + t.Errorf("Const for Accepted is %d, should be %d", Accepted, 0) + } + if ErrRefusedBadProtocolVersion != 0x01 { + t.Errorf("Const for RefusedBadProtocolVersion is %d, should be %d", ErrRefusedBadProtocolVersion, 1) + } + if ErrRefusedIDRejected != 0x02 { + t.Errorf("Const for RefusedIDRejected is %d, should be %d", ErrRefusedIDRejected, 2) + } + if ErrRefusedServerUnavailable != 0x03 { + t.Errorf("Const for RefusedServerUnavailable is %d, should be %d", ErrRefusedServerUnavailable, 3) + } + if ErrRefusedBadUsernameOrPassword != 0x04 { + t.Errorf("Const for RefusedBadUsernameOrPassword is %d, should be %d", ErrRefusedBadUsernameOrPassword, 4) + } + if ErrRefusedNotAuthorised != 0x05 { + t.Errorf("Const for RefusedNotAuthorised is %d, should be %d", ErrRefusedNotAuthorised, 5) + } +} + +func TestConnectPacket(t *testing.T) { + connectPacketBytes := bytes.NewBuffer([]byte{16, 52, 0, 4, 77, 81, 84, 84, 4, 204, 0, 0, 0, 0, 0, 4, 116, 101, 115, 116, 0, 12, 84, 101, 115, 116, 32, 80, 97, 121, 108, 111, 97, 100, 0, 8, 116, 101, 115, 116, 117, 115, 101, 114, 0, 8, 116, 101, 115, 116, 112, 97, 115, 115}) + packet, err := ReadPacket(connectPacketBytes) + if err != nil { + t.Fatalf("Error reading packet: %s", err.Error()) + } + cp := packet.(*ConnectPacket) + if cp.ProtocolName != "MQTT" { + t.Errorf("Connect Packet ProtocolName is %s, should be %s", cp.ProtocolName, "MQTT") + } + if cp.ProtocolVersion != 4 { + t.Errorf("Connect Packet ProtocolVersion is %d, should be %d", cp.ProtocolVersion, 4) + } + if cp.UsernameFlag != true { + t.Errorf("Connect Packet UsernameFlag is %t, should be %t", cp.UsernameFlag, true) + } + if cp.Username != "testuser" { + t.Errorf("Connect Packet Username is %s, should be %s", cp.Username, "testuser") + } + if cp.PasswordFlag != true { + t.Errorf("Connect Packet PasswordFlag is %t, should be %t", cp.PasswordFlag, true) + } + if string(cp.Password) != "testpass" { + t.Errorf("Connect Packet Password is %s, should be %s", string(cp.Password), "testpass") + } + if cp.WillFlag != true { + t.Errorf("Connect Packet WillFlag is %t, should be %t", cp.WillFlag, true) + } + if cp.WillTopic != "test" { + t.Errorf("Connect Packet WillTopic is %s, should be %s", cp.WillTopic, "test") + } + if cp.WillQos != 1 { + t.Errorf("Connect Packet WillQos is %d, should be %d", cp.WillQos, 1) + } + if cp.WillRetain != false { + t.Errorf("Connect Packet WillRetain is %t, should be %t", cp.WillRetain, false) + } + if string(cp.WillMessage) != "Test Payload" { + t.Errorf("Connect Packet WillMessage is %s, should be %s", string(cp.WillMessage), "Test Payload") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go new file mode 100644 index 000000000..216a5f8fc --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingreq.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PingreqPacket is an internal representation of the fields of the +//Pingreq MQTT packet +type PingreqPacket struct { + FixedHeader + uuid uuid.UUID +} + +func (pr *PingreqPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingreqPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingreqPacket) Unpack(b io.Reader) { +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingreqPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pr *PingreqPacket) UUID() uuid.UUID { + return pr.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go new file mode 100644 index 000000000..4658def97 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pingresp.go @@ -0,0 +1,44 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PingrespPacket is an internal representation of the fields of the +//Pingresp MQTT packet +type PingrespPacket struct { + FixedHeader + uuid uuid.UUID +} + +func (pr *PingrespPacket) String() string { + str := fmt.Sprintf("%s", pr.FixedHeader) + return str +} + +func (pr *PingrespPacket) Write(w io.Writer) error { + packet := pr.FixedHeader.pack() + _, err := packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PingrespPacket) Unpack(b io.Reader) { +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PingrespPacket) Details() Details { + return Details{Qos: 0, MessageID: 0} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pr *PingrespPacket) UUID() uuid.UUID { + return pr.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go new file mode 100644 index 000000000..a3fe5db31 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/puback.go @@ -0,0 +1,50 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PubackPacket is an internal representation of the fields of the +//Puback MQTT packet +type PubackPacket struct { + FixedHeader + MessageID uint16 + uuid uuid.UUID +} + +func (pa *PubackPacket) String() string { + str := fmt.Sprintf("%s\n", pa.FixedHeader) + str += fmt.Sprintf("messageID: %d", pa.MessageID) + return str +} + +func (pa *PubackPacket) Write(w io.Writer) error { + var err error + pa.FixedHeader.RemainingLength = 2 + packet := pa.FixedHeader.pack() + packet.Write(encodeUint16(pa.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pa *PubackPacket) Unpack(b io.Reader) { + pa.MessageID = decodeUint16(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pa *PubackPacket) Details() Details { + return Details{Qos: pa.Qos, MessageID: pa.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pa *PubackPacket) UUID() uuid.UUID { + return pa.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go new file mode 100644 index 000000000..0cd5c860c --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubcomp.go @@ -0,0 +1,50 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PubcompPacket is an internal representation of the fields of the +//Pubcomp MQTT packet +type PubcompPacket struct { + FixedHeader + MessageID uint16 + uuid uuid.UUID +} + +func (pc *PubcompPacket) String() string { + str := fmt.Sprintf("%s\n", pc.FixedHeader) + str += fmt.Sprintf("MessageID: %d", pc.MessageID) + return str +} + +func (pc *PubcompPacket) Write(w io.Writer) error { + var err error + pc.FixedHeader.RemainingLength = 2 + packet := pc.FixedHeader.pack() + packet.Write(encodeUint16(pc.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pc *PubcompPacket) Unpack(b io.Reader) { + pc.MessageID = decodeUint16(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pc *PubcompPacket) Details() Details { + return Details{Qos: pc.Qos, MessageID: pc.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pc *PubcompPacket) UUID() uuid.UUID { + return pc.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go new file mode 100644 index 000000000..cb886cf78 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/publish.go @@ -0,0 +1,82 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PublishPacket is an internal representation of the fields of the +//Publish MQTT packet +type PublishPacket struct { + FixedHeader + TopicName string + MessageID uint16 + Payload []byte + uuid uuid.UUID +} + +func (p *PublishPacket) String() string { + str := fmt.Sprintf("%s\n", p.FixedHeader) + str += fmt.Sprintf("topicName: %s MessageID: %d\n", p.TopicName, p.MessageID) + str += fmt.Sprintf("payload: %s\n", string(p.Payload)) + return str +} + +func (p *PublishPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeString(p.TopicName)) + if p.Qos > 0 { + body.Write(encodeUint16(p.MessageID)) + } + p.FixedHeader.RemainingLength = body.Len() + len(p.Payload) + packet := p.FixedHeader.pack() + packet.Write(body.Bytes()) + packet.Write(p.Payload) + _, err = w.Write(packet.Bytes()) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (p *PublishPacket) Unpack(b io.Reader) { + var payloadLength = p.FixedHeader.RemainingLength + p.TopicName = decodeString(b) + if p.Qos > 0 { + p.MessageID = decodeUint16(b) + payloadLength -= len(p.TopicName) + 4 + } else { + payloadLength -= len(p.TopicName) + 2 + } + p.Payload = make([]byte, payloadLength) + b.Read(p.Payload) +} + +//Copy creates a new PublishPacket with the same topic and payload +//but an empty fixed header, useful for when you want to deliver +//a message with different properties such as Qos but the same +//content +func (p *PublishPacket) Copy() *PublishPacket { + newP := NewControlPacket(Publish).(*PublishPacket) + newP.TopicName = p.TopicName + newP.Payload = p.Payload + + return newP +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (p *PublishPacket) Details() Details { + return Details{Qos: p.Qos, MessageID: p.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (p *PublishPacket) UUID() uuid.UUID { + return p.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go new file mode 100644 index 000000000..b83914bc9 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrec.go @@ -0,0 +1,50 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PubrecPacket is an internal representation of the fields of the +//Pubrec MQTT packet +type PubrecPacket struct { + FixedHeader + MessageID uint16 + uuid uuid.UUID +} + +func (pr *PubrecPacket) String() string { + str := fmt.Sprintf("%s\n", pr.FixedHeader) + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrecPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrecPacket) Unpack(b io.Reader) { + pr.MessageID = decodeUint16(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrecPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pr *PubrecPacket) UUID() uuid.UUID { + return pr.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go new file mode 100644 index 000000000..14bfffdda --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/pubrel.go @@ -0,0 +1,50 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//PubrelPacket is an internal representation of the fields of the +//Pubrel MQTT packet +type PubrelPacket struct { + FixedHeader + MessageID uint16 + uuid uuid.UUID +} + +func (pr *PubrelPacket) String() string { + str := fmt.Sprintf("%s\n", pr.FixedHeader) + str += fmt.Sprintf("MessageID: %d", pr.MessageID) + return str +} + +func (pr *PubrelPacket) Write(w io.Writer) error { + var err error + pr.FixedHeader.RemainingLength = 2 + packet := pr.FixedHeader.pack() + packet.Write(encodeUint16(pr.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (pr *PubrelPacket) Unpack(b io.Reader) { + pr.MessageID = decodeUint16(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (pr *PubrelPacket) Details() Details { + return Details{Qos: pr.Qos, MessageID: pr.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (pr *PubrelPacket) UUID() uuid.UUID { + return pr.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go new file mode 100644 index 000000000..0bd3b665d --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/suback.go @@ -0,0 +1,58 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//SubackPacket is an internal representation of the fields of the +//Suback MQTT packet +type SubackPacket struct { + FixedHeader + MessageID uint16 + GrantedQoss []byte + uuid uuid.UUID +} + +func (sa *SubackPacket) String() string { + str := fmt.Sprintf("%s\n", sa.FixedHeader) + str += fmt.Sprintf("MessageID: %d", sa.MessageID) + return str +} + +func (sa *SubackPacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(sa.MessageID)) + body.Write(sa.GrantedQoss) + sa.FixedHeader.RemainingLength = body.Len() + packet := sa.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (sa *SubackPacket) Unpack(b io.Reader) { + var qosBuffer bytes.Buffer + sa.MessageID = decodeUint16(b) + qosBuffer.ReadFrom(b) + sa.GrantedQoss = qosBuffer.Bytes() +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (sa *SubackPacket) Details() Details { + return Details{Qos: 0, MessageID: sa.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (sa *SubackPacket) UUID() uuid.UUID { + return sa.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go new file mode 100644 index 000000000..0aff19aac --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/subscribe.go @@ -0,0 +1,68 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//SubscribePacket is an internal representation of the fields of the +//Subscribe MQTT packet +type SubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string + Qoss []byte + uuid uuid.UUID +} + +func (s *SubscribePacket) String() string { + str := fmt.Sprintf("%s\n", s.FixedHeader) + str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics) + return str +} + +func (s *SubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + + body.Write(encodeUint16(s.MessageID)) + for i, topic := range s.Topics { + body.Write(encodeString(topic)) + body.WriteByte(s.Qoss[i]) + } + s.FixedHeader.RemainingLength = body.Len() + packet := s.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (s *SubscribePacket) Unpack(b io.Reader) { + s.MessageID = decodeUint16(b) + payloadLength := s.FixedHeader.RemainingLength - 2 + for payloadLength > 0 { + topic := decodeString(b) + s.Topics = append(s.Topics, topic) + qos := decodeByte(b) + s.Qoss = append(s.Qoss, qos) + payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos + } +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (s *SubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: s.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (s *SubscribePacket) UUID() uuid.UUID { + return s.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go new file mode 100644 index 000000000..ef67734f2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsuback.go @@ -0,0 +1,50 @@ +package packets + +import ( + "fmt" + "github.com/pborman/uuid" + "io" +) + +//UnsubackPacket is an internal representation of the fields of the +//Unsuback MQTT packet +type UnsubackPacket struct { + FixedHeader + MessageID uint16 + uuid uuid.UUID +} + +func (ua *UnsubackPacket) String() string { + str := fmt.Sprintf("%s\n", ua.FixedHeader) + str += fmt.Sprintf("MessageID: %d", ua.MessageID) + return str +} + +func (ua *UnsubackPacket) Write(w io.Writer) error { + var err error + ua.FixedHeader.RemainingLength = 2 + packet := ua.FixedHeader.pack() + packet.Write(encodeUint16(ua.MessageID)) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (ua *UnsubackPacket) Unpack(b io.Reader) { + ua.MessageID = decodeUint16(b) +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (ua *UnsubackPacket) Details() Details { + return Details{Qos: 0, MessageID: ua.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (ua *UnsubackPacket) UUID() uuid.UUID { + return ua.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go new file mode 100644 index 000000000..d7ee03cfc --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets/unsubscribe.go @@ -0,0 +1,61 @@ +package packets + +import ( + "bytes" + "fmt" + "github.com/pborman/uuid" + "io" +) + +//UnsubscribePacket is an internal representation of the fields of the +//Unsubscribe MQTT packet +type UnsubscribePacket struct { + FixedHeader + MessageID uint16 + Topics []string + uuid uuid.UUID +} + +func (u *UnsubscribePacket) String() string { + str := fmt.Sprintf("%s\n", u.FixedHeader) + str += fmt.Sprintf("MessageID: %d", u.MessageID) + return str +} + +func (u *UnsubscribePacket) Write(w io.Writer) error { + var body bytes.Buffer + var err error + body.Write(encodeUint16(u.MessageID)) + for _, topic := range u.Topics { + body.Write(encodeString(topic)) + } + u.FixedHeader.RemainingLength = body.Len() + packet := u.FixedHeader.pack() + packet.Write(body.Bytes()) + _, err = packet.WriteTo(w) + + return err +} + +//Unpack decodes the details of a ControlPacket after the fixed +//header has been read +func (u *UnsubscribePacket) Unpack(b io.Reader) { + u.MessageID = decodeUint16(b) + var topic string + for topic = decodeString(b); topic != ""; topic = decodeString(b) { + u.Topics = append(u.Topics, topic) + } +} + +//Details returns a Details struct containing the Qos and +//MessageID of this ControlPacket +func (u *UnsubscribePacket) Details() Details { + return Details{Qos: 1, MessageID: u.MessageID} +} + +//UUID returns the unique ID assigned to the ControlPacket when +//it was originally received. Note: this is not related to the +//MessageID field for MQTT packets +func (u *UnsubscribePacket) UUID() uuid.UUID { + return u.uuid +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go new file mode 100644 index 000000000..1ccd1ec89 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/ping.go @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "sync" + "time" +) + +type lastcontact struct { + sync.Mutex + lasttime time.Time +} + +func (l *lastcontact) update() { + l.Lock() + defer l.Unlock() + l.lasttime = time.Now() + +} + +func (l *lastcontact) get() time.Time { + l.Lock() + defer l.Unlock() + return l.lasttime +} + +func keepalive(c *Client) { + DEBUG.Println(PNG, "keepalive starting") + c.pingOutstanding = false + + for { + select { + case <-c.stop: + DEBUG.Println(PNG, "keepalive stopped") + c.workers.Done() + return + default: + last := uint(time.Since(c.lastContact.get()).Seconds()) + //DEBUG.Printf("%s last contact: %d (timeout: %d)", PNG, last, uint(c.options.KeepAlive.Seconds())) + if last > uint(c.options.KeepAlive.Seconds()) { + if !c.pingOutstanding { + DEBUG.Println(PNG, "keepalive sending ping") + ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) + //We don't want to wait behind large messages being sent, the Write call + //will block until it it able to send the packet. + ping.Write(c.conn) + c.pingOutstanding = true + } else { + CRITICAL.Println(PNG, "pingresp not received, disconnecting") + c.workers.Done() + c.internalConnLost(errors.New("pingresp not received, disconnecting")) + return + } + } + time.Sleep(1 * time.Second) + } + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go new file mode 100644 index 000000000..8e8172cb5 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/router.go @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "container/list" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "strings" + "sync" +) + +// route is a type which associates MQTT Topic strings with a +// callback to be executed upon the arrival of a message associated +// with a subscription to that topic. +type route struct { + topic string + callback MessageHandler +} + +// match takes a slice of strings which represent the route being tested having been split on '/' +// separators, and a slice of strings representing the topic string in the published message, similarly +// split. +// The function determines if the topic string matches the route according to the MQTT topic rules +// and returns a boolean of the outcome +func match(route []string, topic []string) bool { + if len(route) == 0 { + if len(topic) == 0 { + return true + } + return false + } + + if len(topic) == 0 { + if route[0] == "#" { + return true + } + return false + } + + if route[0] == "#" { + return true + } + + if (route[0] == "+") || (route[0] == topic[0]) { + return match(route[1:], topic[1:]) + } + + return false +} + +func routeIncludesTopic(route, topic string) bool { + return match(strings.Split(route, "/"), strings.Split(topic, "/")) +} + +// match takes the topic string of the published message and does a basic compare to the +// string of the current Route, if they match it returns true +func (r *route) match(topic string) bool { + return r.topic == topic || routeIncludesTopic(r.topic, topic) +} + +type router struct { + sync.RWMutex + routes *list.List + defaultHandler MessageHandler + messages chan *packets.PublishPacket + stop chan bool +} + +// newRouter returns a new instance of a Router and channel which can be used to tell the Router +// to stop +func newRouter() (*router, chan bool) { + router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)} + stop := router.stop + return router, stop +} + +// addRoute takes a topic string and MessageHandler callback. It looks in the current list of +// routes to see if there is already a matching Route. If there is it replaces the current +// callback with the new one. If not it add a new entry to the list of Routes. +func (r *router) addRoute(topic string, callback MessageHandler) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r := e.Value.(*route) + r.callback = callback + return + } + } + r.routes.PushBack(&route{topic: topic, callback: callback}) +} + +// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If +// found it removes the Route from the list. +func (r *router) deleteRoute(topic string) { + r.Lock() + defer r.Unlock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(topic) { + r.routes.Remove(e) + return + } + } +} + +// setDefaultHandler assigns a default callback that will be called if no matching Route +// is found for an incoming Publish. +func (r *router) setDefaultHandler(handler MessageHandler) { + r.defaultHandler = handler +} + +// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that +// takes messages off the channel, matches them against the internal route list and calls the +// associated callback (or the defaultHandler, if one exists and no other route matched). If +// anything is sent down the stop channel the function will end. +func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *Client) { + go func() { + for { + select { + case message := <-messages: + sent := false + r.RLock() + for e := r.routes.Front(); e != nil; e = e.Next() { + if e.Value.(*route).match(message.TopicName) { + if order { + r.RUnlock() + e.Value.(*route).callback(client, messageFromPublish(message)) + r.RLock() + } else { + go e.Value.(*route).callback(client, messageFromPublish(message)) + } + sent = true + } + } + r.RUnlock() + if !sent && r.defaultHandler != nil { + if order { + r.RLock() + r.defaultHandler(client, messageFromPublish(message)) + r.RUnlock() + } else { + go r.defaultHandler(client, messageFromPublish(message)) + } + } + case <-r.stop: + return + } + } + }() +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go new file mode 100644 index 000000000..4e1f3065b --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/bug-ping.go @@ -0,0 +1,18 @@ +package main + +import ( + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + for { + time.Sleep(1 * time.Second) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh new file mode 100644 index 000000000..11c5a6bbd --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/build.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +go clean + +for file in *.go +do + echo -n "Compiling $file ..." + go build "$file" + echo " done." +done diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go new file mode 100644 index 000000000..8fa0035df --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/close_bug.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") + opts.SetCleanSession(true) + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + fmt.Println("plz mosquitto goes down now") + time.Sleep(5 * time.Second) + + c.Disconnect(200) + time.Sleep(5 * time.Second) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go new file mode 100644 index 000000000..19c2a28b6 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/custom_store.go @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +// This demonstrates how to implement your own Store interface and provide +// it to the go-mqtt client. + +package main + +import ( + "fmt" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" +) + +// This NoOpStore type implements the go-mqtt/Store interface, which +// allows it to be used by the go-mqtt client library. However, it is +// highly recommended that you do not use this NoOpStore in production, +// because it will NOT provide any sort of guaruntee of message delivery. +type NoOpStore struct { + // Contain nothing +} + +func (store *NoOpStore) Open() { + // Do nothing +} + +func (store *NoOpStore) Put(string, packets.ControlPacket) { + // Do nothing +} + +func (store *NoOpStore) Get(string) packets.ControlPacket { + // Do nothing + return nil +} + +func (store *NoOpStore) Del(string) { + // Do nothing +} + +func (store *NoOpStore) All() []string { + return nil +} + +func (store *NoOpStore) Close() { + // Do Nothing +} + +func (store *NoOpStore) Reset() { + // Do Nothing +} + +func main() { + myNoOpStore := &NoOpStore{} + + opts := MQTT.NewClientOptions() + opts.AddBroker("tcp://iot.eclipse.org:1883") + opts.SetClientID("custom-store") + opts.SetStore(myNoOpStore) + + var callback MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) + } + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + c.Subscribe("/go-mqtt/sample", 0, callback) + + for i := 0; i < 5; i++ { + text := fmt.Sprintf("this is msg #%d!", i) + token := c.Publish("/go-mqtt/sample", 0, false, text) + token.Wait() + } + + for i := 1; i < 5; i++ { + time.Sleep(1 * time.Second) + } + + c.Disconnect(250) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf new file mode 100644 index 000000000..4c54bc201 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/mosquitto.conf @@ -0,0 +1,745 @@ +# Config file for mosquitto +# +# See mosquitto.conf(5) for more information. +# +# Default values are shown, uncomment to change. +# +# Use the # character to indicate a comment, but only if it is the +# very first character on the line. + +# ================================================================= +# General configuration +# ================================================================= + +# Time in seconds to wait before resending an outgoing QoS=1 or +# QoS=2 message. +#retry_interval 20 + +# Time in seconds between updates of the $SYS tree. +# Set to 0 to disable the publishing of the $SYS tree. +#sys_interval 10 + +# Time in seconds between cleaning the internal message store of +# unreferenced messages. Lower values will result in lower memory +# usage but more processor time, higher values will have the +# opposite effect. +# Setting a value of 0 means the unreferenced messages will be +# disposed of as quickly as possible. +#store_clean_interval 10 + +# Write process id to a file. Default is a blank string which means +# a pid file shouldn't be written. +# This should be set to /var/run/mosquitto.pid if mosquitto is +# being run automatically on boot with an init script and +# start-stop-daemon or similar. +#pid_file + +# When run as root, drop privileges to this user and its primary +# group. +# Leave blank to stay as root, but this is not recommended. +# If run as a non-root user, this setting has no effect. +# Note that on Windows this has no effect and so mosquitto should +# be started by the user you wish it to run as. +#user mosquitto + +# The maximum number of QoS 1 and 2 messages currently inflight per +# client. +# This includes messages that are partway through handshakes and +# those that are being retried. Defaults to 20. Set to 0 for no +# maximum. Setting to 1 will guarantee in-order delivery of QoS 1 +# and 2 messages. +#max_inflight_messages 20 + +# The maximum number of QoS 1 and 2 messages to hold in a queue +# above those that are currently in-flight. Defaults to 100. Set +# to 0 for no maximum (not recommended). +# See also queue_qos0_messages. +#max_queued_messages 100 + +# Set to true to queue messages with QoS 0 when a persistent client is +# disconnected. These messages are included in the limit imposed by +# max_queued_messages. +# Defaults to false. +# This is a non-standard option for the MQTT v3.1 spec but is allowed in +# v3.1.1. +#queue_qos0_messages false + +# This option sets the maximum publish payload size that the broker will allow. +# Received messages that exceed this size will not be accepted by the broker. +# The default value is 0, which means that all valid MQTT messages are +# accepted. MQTT imposes a maximum payload size of 268435455 bytes. +#message_size_limit 0 + +# This option controls whether a client is allowed to connect with a zero +# length client id or not. This option only affects clients using MQTT v3.1.1 +# and later. If set to false, clients connecting with a zero length client id +# are disconnected. If set to true, clients will be allocated a client id by +# the broker. This means it is only useful for clients with clean session set +# to true. +#allow_zero_length_clientid true + +# If allow_zero_length_clientid is true, this option allows you to set a prefix +# to automatically generated client ids to aid visibility in logs. +#auto_id_prefix + +# This option allows persistent clients (those with clean session set to false) +# to be removed if they do not reconnect within a certain time frame. +# +# This is a non-standard option in MQTT V3.1 but allowed in MQTT v3.1.1. +# +# Badly designed clients may set clean session to false whilst using a randomly +# generated client id. This leads to persistent clients that will never +# reconnect. This option allows these clients to be removed. +# +# The expiration period should be an integer followed by one of d w m y for +# day, week, month and year respectively. For example +# +# persistent_client_expiration 2m +# persistent_client_expiration 14d +# persistent_client_expiration 1y +# +# The default if not set is to never expire persistent clients. +#persistent_client_expiration + +# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/# +# and foo/+/baz , then MQTT expects that when the broker receives a message on +# a topic that matches both subscriptions, such as foo/bar/baz, then the client +# should only receive the message once. +# Mosquitto keeps track of which clients a message has been sent to in order to +# meet this requirement. The allow_duplicate_messages option allows this +# behaviour to be disabled, which may be useful if you have a large number of +# clients subscribed to the same set of topics and are very concerned about +# minimising memory usage. +# It can be safely set to true if you know in advance that your clients will +# never have overlapping subscriptions, otherwise your clients must be able to +# correctly deal with duplicate messages even when then have QoS=2. +#allow_duplicate_messages false + +# The MQTT specification requires that the QoS of a message delivered to a +# subscriber is never upgraded to match the QoS of the subscription. Enabling +# this option changes this behaviour. If upgrade_outgoing_qos is set true, +# messages sent to a subscriber will always match the QoS of its subscription. +# This is a non-standard option explicitly disallowed by the spec. +#upgrade_outgoing_qos false + +# ================================================================= +# Default listener +# ================================================================= + +# IP address/hostname to bind the default listener to. If not +# given, the default listener will not be bound to a specific +# address and so will be accessible to all network interfaces. +# bind_address ip-address/host name +#bind_address + +# Port to use for the default listener. +#port 1883 + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS +# is 8883, but this must be set manually. +# +# See also the mosquitto-tls man page. + +# At least one of cafile or capath must be defined. They both +# define methods of accessing the PEM encoded Certificate +# Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# This option defines the version of the TLS protocol to use for this listener. +# The default value will always be the highest version that is available for +# the version of openssl that the broker was compiled against. For openssl >= +# 1.0.1 the valid values are tlsv1.2 tlsv1.1 and tlsv1. For openssl < 1.0.1 the +# valid values are tlsv1. +#tls_version + +# By default a TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +# If unset defaults to DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH +#ciphers DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Extra listeners +# ================================================================= + +# Listen on a port/ip address combination. By using this variable +# multiple times, mosquitto can listen on more than one port. If +# this variable is used and neither bind_address nor port given, +# then the default listener will not be started. +# The port number to listen on must be given. Optionally, an ip +# address or host name may be supplied as a second argument. In +# this case, mosquitto will attempt to bind the listener to that +# address and so restrict access to the associated network and +# interface. By default, mosquitto will listen on all interfaces. +# listener port-number [ip address/host name] +#listener + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# The listener can be restricted to operating within a topic hierarchy using +# the mount_point option. This is achieved be prefixing the mount_point string +# to all topics for any clients connected to this listener. This prefixing only +# happens internally to the broker; the client will not see the prefix. +#mount_point + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable certificate based SSL/TLS support +# for this listener. Note that the recommended port for MQTT over TLS is 8883, +# but this must be set manually. +# +# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# At least one of cafile or capath must be defined to enable certificate based +# TLS encryption. They both define methods of accessing the PEM encoded +# Certificate Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# By default an TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +#ciphers + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Persistence +# ================================================================= + +# If persistence is enabled, save the in-memory database to disk +# every autosave_interval seconds. If set to 0, the persistence +# database will only be written when mosquitto exits. See also +# autosave_on_changes. +# Note that writing of the persistence database can be forced by +# sending mosquitto a SIGUSR1 signal. +#autosave_interval 1800 + +# If true, mosquitto will count the number of subscription changes, retained +# messages received and queued messages and if the total exceeds +# autosave_interval then the in-memory database will be saved to disk. +# If false, mosquitto will save the in-memory database to disk by treating +# autosave_interval as a time in seconds. +#autosave_on_changes false + +# Save persistent message data to disk (true/false). +# This saves information about all messages, including +# subscriptions, currently in-flight messages and retained +# messages. +# retained_persistence is a synonym for this option. +#persistence false + +# The filename to use for the persistent database, not including +# the path. +#persistence_file mosquitto.db + +# Location for persistent database. Must include trailing / +# Default is an empty string (current directory). +# Set to e.g. /var/lib/mosquitto/ if running as a proper service on Linux or +# similar. +#persistence_location + +# ================================================================= +# Logging +# ================================================================= + +# Places to log to. Use multiple log_dest lines for multiple +# logging destinations. +# Possible destinations are: stdout stderr syslog topic file +# +# stdout and stderr log to the console on the named output. +# +# syslog uses the userspace syslog facility which usually ends up +# in /var/log/messages or similar. +# +# topic logs to the broker topic '$SYS/broker/log/', +# where severity is one of D, E, W, N, I, M which are debug, error, +# warning, notice, information and message. Message type severity is used by +# the subscribe/unsubscribe log_types and publishes log messages to +# $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe. +# +# The file destination requires an additional parameter which is the file to be +# logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be +# closed and reopened when the broker receives a HUP signal. Only a single file +# destination may be configured. +# +# Note that if the broker is running as a Windows service it will default to +# "log_dest none" and neither stdout nor stderr logging is available. +# Use "log_dest none" if you wish to disable logging. +log_dest stdout + +# Types of messages to log. Use multiple log_type lines for logging +# multiple types of messages. +# Possible types are: debug, error, warning, notice, information, +# none, subscribe, unsubscribe, all. +# Note that debug type messages are for decoding the incoming/outgoing +# network packets. They are not logged in "topics". +#log_type error +#log_type warning +#log_type notice +log_type information + +# If set to true, client connection and disconnection messages will be included +# in the log. +#connection_messages true + +# If set to true, add a timestamp value to each log message. +#log_timestamp true + +# ================================================================= +# Security +# ================================================================= + +# If set, only clients that have a matching prefix on their +# clientid will be allowed to connect to the broker. By default, +# all clients may connect. +# For example, setting "secure-" here would mean a client "secure- +# client" could connect but another with clientid "mqtt" couldn't. +#clientid_prefixes + +# Boolean value that determines whether clients that connect +# without providing a username are allowed to connect. If set to +# false then a password file should be created (see the +# password_file option) to control authenticated client access. +# Defaults to true. +#allow_anonymous true + +# In addition to the clientid_prefixes, allow_anonymous and TLS +# authentication options, username based authentication is also +# possible. The default support is described in "Default +# authentication and topic access control" below. The auth_plugin +# allows another authentication method to be used. +# Specify the path to the loadable plugin and see the +# "Authentication and topic access plugin options" section below. +#auth_plugin + +# ----------------------------------------------------------------- +# Default authentication and topic access control +# ----------------------------------------------------------------- + +# Control access to the broker using a password file. This file can be +# generated using the mosquitto_passwd utility. If TLS support is not compiled +# into mosquitto (it is recommended that TLS support should be included) then +# plain text passwords are used, in which case the file should be a text file +# with lines in the format: +# username:password +# The password (and colon) may be omitted if desired, although this +# offers very little in the way of security. +# +# See the TLS client require_certificate and use_identity_as_username options +# for alternative authentication options. +password_file pwfile.example + +# Access may also be controlled using a pre-shared-key file. This requires +# TLS-PSK support and a listener configured to use it. The file should be text +# lines in the format: +# identity:key +# The key should be in hexadecimal format without a leading "0x". +#psk_file + +# Control access to topics on the broker using an access control list +# file. If this parameter is defined then only the topics listed will +# have access. +# If the first character of a line of the ACL file is a # it is treated as a +# comment. +# Topic access is added with lines of the format: +# +# topic [read|write] +# +# The access type is controlled using "read" or "write". This parameter +# is optional - if not given then the access is read/write. +# can contain the + or # wildcards as in subscriptions. +# +# The first set of topics are applied to anonymous clients, assuming +# allow_anonymous is true. User specific topic ACLs are added after a +# user line as follows: +# +# user +# +# The username referred to here is the same as in password_file. It is +# not the clientid. +# +# +# If is also possible to define ACLs based on pattern substitution within the +# topic. The patterns available for substition are: +# +# %c to match the client id of the client +# %u to match the username of the client +# +# The substitution pattern must be the only text for that level of hierarchy. +# +# The form is the same as for the topic keyword, but using pattern as the +# keyword. +# Pattern ACLs apply to all users even if the "user" keyword has previously +# been given. +# +# If using bridges with usernames and ACLs, connection messages can be allowed +# with the following pattern: +# pattern write $SYS/broker/connection/%c/state +# +# pattern [read|write] +# +# Example: +# +# pattern write sensor/%u/data +# +#acl_file + +# ----------------------------------------------------------------- +# Authentication and topic access plugin options +# ----------------------------------------------------------------- + +# If the auth_plugin option above is used, define options to pass to the +# plugin here as described by the plugin instructions. All options named +# using the format auth_opt_* will be passed to the plugin, for example: +# +# auth_opt_db_host +# auth_opt_db_port +# auth_opt_db_username +# auth_opt_db_password + + +# ================================================================= +# Bridges +# ================================================================= + +# A bridge is a way of connecting multiple MQTT brokers together. +# Create a new bridge using the "connection" option as described below. Set +# options for the bridges using the remaining parameters. You must specify the +# address and at least one topic to subscribe to. +# Each connection must have a unique name. +# The address line may have multiple host address and ports specified. See +# below in the round_robin description for more details on bridge behaviour if +# multiple addresses are used. +# The direction that the topic will be shared can be chosen by +# specifying out, in or both, where the default value is out. +# The QoS level of the bridged communication can be specified with the next +# topic option. The default QoS level is 0, to change the QoS the topic +# direction must also be given. +# The local and remote prefix options allow a topic to be remapped when it is +# bridged to/from the remote broker. This provides the ability to place a topic +# tree in an appropriate location. +# For more details see the mosquitto.conf man page. +# Multiple topics can be specified per connection, but be careful +# not to create any loops. +# If you are using bridges with cleansession set to false (the default), then +# you may get unexpected behaviour from incoming topics if you change what +# topics you are subscribing to. This is because the remote broker keeps the +# subscription for the old topic. If you have this problem, connect your bridge +# with cleansession set to true, then reconnect with cleansession set to false +# as normal. +#connection +#address [:] [[:]] +#topic [[[out | in | both] qos-level] local-prefix remote-prefix] + +# If the bridge has more than one address given in the address/addresses +# configuration, the round_robin option defines the behaviour of the bridge on +# a failure of the bridge connection. If round_robin is false, the default +# value, then the first address is treated as the main bridge connection. If +# the connection fails, the other secondary addresses will be attempted in +# turn. Whilst connected to a secondary bridge, the bridge will periodically +# attempt to reconnect to the main bridge until successful. +# If round_robin is true, then all addresses are treated as equals. If a +# connection fails, the next address will be tried and if successful will +# remain connected until it fails +#round_robin false + +# Set the client id for this bridge connection. If not defined, +# this defaults to 'name.hostname' where name is the connection +# name and hostname is the hostname of this computer. +#clientid + +# Set the clean session variable for this bridge. +# When set to true, when the bridge disconnects for any reason, all +# messages and subscriptions will be cleaned up on the remote +# broker. Note that with cleansession set to true, there may be a +# significant amount of retained messages sent when the bridge +# reconnects after losing its connection. +# When set to false, the subscriptions and messages are kept on the +# remote broker, and delivered when the bridge reconnects. +#cleansession false + +# If set to true, publish notification messages to the local and remote brokers +# giving information about the state of the bridge connection. Retained +# messages are published to the topic $SYS/broker/connection//state +# unless the notification_topic option is used. +# If the message is 1 then the connection is active, or 0 if the connection has +# failed. +#notifications true + +# Choose the topic on which notification messages for this bridge are +# published. If not set, messages are published on the topic +# $SYS/broker/connection//state +#notification_topic + +# Set the keepalive interval for this bridge connection, in +# seconds. +#keepalive_interval 60 + +# Set the start type of the bridge. This controls how the bridge starts and +# can be one of three types: automatic, lazy and once. Note that RSMB provides +# a fourth start type "manual" which isn't currently supported by mosquitto. +# +# "automatic" is the default start type and means that the bridge connection +# will be started automatically when the broker starts and also restarted +# after a short delay (30 seconds) if the connection fails. +# +# Bridges using the "lazy" start type will be started automatically when the +# number of queued messages exceeds the number set with the "threshold" +# parameter. It will be stopped automatically after the time set by the +# "idle_timeout" parameter. Use this start type if you wish the connection to +# only be active when it is needed. +# +# A bridge using the "once" start type will be started automatically when the +# broker starts but will not be restarted if the connection fails. +#start_type automatic + +# Set the amount of time a bridge using the automatic start type will wait +# until attempting to reconnect. Defaults to 30 seconds. +#restart_timeout 30 + +# Set the amount of time a bridge using the lazy start type must be idle before +# it will be stopped. Defaults to 60 seconds. +#idle_timeout 60 + +# Set the number of messages that need to be queued for a bridge with lazy +# start type to be restarted. Defaults to 10 messages. +# Must be less than max_queued_messages. +#threshold 10 + +# If try_private is set to true, the bridge will attempt to indicate to the +# remote broker that it is a bridge not an ordinary client. If successful, this +# means that loop detection will be more effective and that retained messages +# will be propagated correctly. Not all brokers support this feature so it may +# be necessary to set try_private to false if your bridge does not connect +# properly. +#try_private true + +# Set the username to use when connecting to an MQTT v3.1 broker +# that requires authentication. +#username + +# Set the password to use when connecting to an MQTT v3.1 broker +# that requires authentication. This option is only used if +# username is also set. +#password + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# Either bridge_cafile or bridge_capath must be defined to enable TLS support +# for this bridge. +# bridge_cafile defines the path to a file containing the +# Certificate Authority certificates that have signed the remote broker +# certificate. +# bridge_capath defines a directory that will be searched for files containing +# the CA certificates. For bridge_capath to work correctly, the certificate +# files must have ".crt" as the file ending and you must run "c_rehash " each time you add/remove a certificate. +#bridge_cafile +#bridge_capath + +# Path to the PEM encoded client certificate, if required by the remote broker. +#bridge_certfile + +# Path to the PEM encoded client private key, if required by the remote broker. +#bridge_keyfile + +# When using certificate based encryption, bridge_insecure disables +# verification of the server hostname in the server certificate. This can be +# useful when testing initial server configurations, but makes it possible for +# a malicious third party to impersonate your server through DNS spoofing, for +# example. Use this option in testing only. If you need to resort to using this +# option in a production environment, your setup is at fault and there is no +# point using encryption. +#bridge_insecure false + +# ----------------------------------------------------------------- +# PSK based SSL/TLS support +# ----------------------------------------------------------------- +# Pre-shared-key encryption provides an alternative to certificate based +# encryption. A bridge can be configured to use PSK with the bridge_identity +# and bridge_psk options. These are the client PSK identity, and pre-shared-key +# in hexadecimal format with no "0x". Only one of certificate and PSK based +# encryption can be used on one +# bridge at once. +#bridge_identity +#bridge_psk + + +# ================================================================= +# External config files +# ================================================================= + +# External configuration files may be included by using the +# include_dir option. This defines a directory that will be searched +# for config files. All files that end in '.conf' will be loaded as +# a configuration file. It is best to have this as the last option +# in the main file. This option will only be processed from the main +# configuration file. The directory specified must not contain the +# main configuration file. +#include_dir + +# ================================================================= +# Unsupported rsmb options - for the future +# ================================================================= + +#addresses +#round_robin + +# ================================================================= +# rsmb options - unlikely to ever be supported +# ================================================================= + +#ffdc_output +#max_log_entries +#trace_level +#trace_output diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example new file mode 100644 index 000000000..58b94c9aa --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/pwfile.example @@ -0,0 +1,3 @@ +roger:$6$clQ4Ocu312S0qWgl$Cv2wUxgEN73c6C6jlBkswqR4AkHsvDLWvtEXZZ8NpsBLgP1WAo/qA+WXcmEN/mjDNgdUwcxRAveqNMs2xUVQYA== +sub_client:$6$U+qg0/32F0g2Fh+n$fBPSkq/rfNyEQ/TkEjRgwGTTVBpvNhKSyGShovH9KHewsvJ731tD5Zx26IHhR5RYCICt0L9qBW0/KK31UkCliw== +pub_client:$6$vxQ89y+7WrsnL2yn$fSPMmEZn9TSrC8s/jaPmxJ9NijWpkP2e7bMJLz78JXR1vW2x8+T3FZ23byJA6xs5Mt+LeOybAHwcUv0OCl40rA== diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go new file mode 100644 index 000000000..d95c6b59c --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/routing.go @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +/*---------------------------------------------------------------------- +This sample is designed to demonstrate the ability to set individual +callbacks on a per-subscription basis. There are three handlers in use: + brokerLoadHandler - $SYS/broker/load/# + brokerConnectionHandler - $SYS/broker/connection/# + brokerClientHandler - $SYS/broker/clients/# +The client will receive 100 messages total from those subscriptions, +and then print the total number of messages received from each. +It may take a few moments for the sample to complete running, as it +must wait for messages to be published. +-----------------------------------------------------------------------*/ + +package main + +import ( + "fmt" + "os" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +var brokerLoad = make(chan bool) +var brokerConnection = make(chan bool) +var brokerClients = make(chan bool) + +func brokerLoadHandler(client *MQTT.Client, msg MQTT.Message) { + brokerLoad <- true + fmt.Printf("BrokerLoadHandler ") + fmt.Printf("[%s] ", msg.Topic()) + fmt.Printf("%s\n", msg.Payload()) +} + +func brokerConnectionHandler(client *MQTT.Client, msg MQTT.Message) { + brokerConnection <- true + fmt.Printf("BrokerConnectionHandler ") + fmt.Printf("[%s] ", msg.Topic()) + fmt.Printf("%s\n", msg.Payload()) +} + +func brokerClientsHandler(client *MQTT.Client, msg MQTT.Message) { + brokerClients <- true + fmt.Printf("BrokerClientsHandler ") + fmt.Printf("[%s] ", msg.Topic()) + fmt.Printf("%s\n", msg.Payload()) +} + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("router-sample") + opts.SetCleanSession(true) + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + if token := c.Subscribe("$SYS/broker/load/#", 0, brokerLoadHandler); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + if token := c.Subscribe("$SYS/broker/connection/#", 0, brokerConnectionHandler); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + if token := c.Subscribe("$SYS/broker/clients/#", 0, brokerClientsHandler); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + loadCount := 0 + connectionCount := 0 + clientsCount := 0 + + for i := 0; i < 100; i++ { + select { + case <-brokerLoad: + loadCount++ + case <-brokerConnection: + connectionCount++ + case <-brokerClients: + clientsCount++ + } + } + + fmt.Printf("Received %3d Broker Load messages\n", loadCount) + fmt.Printf("Received %3d Broker Connection messages\n", connectionCount) + fmt.Printf("Received %3d Broker Clients messages\n", clientsCount) + + c.Disconnect(250) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go new file mode 100644 index 000000000..3f89f8af2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sample.go @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package main + +import ( + "flag" + "fmt" + "os" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +/* +Options: + [-help] Display help + [-a pub|sub] Action pub (publish) or sub (subscribe) + [-m ] Payload to send + [-n ] Number of messages to send or receive + [-q 0|1|2] Quality of Service + [-clean] CleanSession (true if -clean is present) + [-id ] CliendID + [-user ] User + [-password ] Password + [-broker ] Broker URI + [-topic ] Topic + [-store ] Store Directory + +*/ + +func main() { + topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe") + broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883") + password := flag.String("password", "", "The password (optional)") + user := flag.String("user", "", "The User (optional)") + id := flag.String("id", "testgoid", "The ClientID (optional)") + cleansess := flag.Bool("clean", false, "Set Clean Session (default false)") + qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)") + num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)") + payload := flag.String("message", "", "The message text to publish (default empty)") + action := flag.String("action", "", "Action publish or subscribe (required)") + store := flag.String("store", ":memory:", "The Store Directory (default use memory store)") + flag.Parse() + + if *action != "pub" && *action != "sub" { + fmt.Println("Invalid setting for -action, must be pub or sub") + return + } + + if *topic == "" { + fmt.Println("Invalid setting for -topic, must not be empty") + return + } + + fmt.Printf("Sample Info:\n") + fmt.Printf("\taction: %s\n", *action) + fmt.Printf("\tbroker: %s\n", *broker) + fmt.Printf("\tclientid: %s\n", *id) + fmt.Printf("\tuser: %s\n", *user) + fmt.Printf("\tpassword: %s\n", *password) + fmt.Printf("\ttopic: %s\n", *topic) + fmt.Printf("\tmessage: %s\n", *payload) + fmt.Printf("\tqos: %d\n", *qos) + fmt.Printf("\tcleansess: %v\n", *cleansess) + fmt.Printf("\tnum: %d\n", *num) + fmt.Printf("\tstore: %s\n", *store) + + opts := MQTT.NewClientOptions() + opts.AddBroker(*broker) + opts.SetClientID(*id) + opts.SetUsername(*user) + opts.SetPassword(*password) + opts.SetCleanSession(*cleansess) + if *store != ":memory:" { + opts.SetStore(MQTT.NewFileStore(*store)) + } + + if *action == "pub" { + client := MQTT.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + fmt.Println("Sample Publisher Started") + for i := 0; i < *num; i++ { + fmt.Println("---- doing publish ----") + token := client.Publish(*topic, byte(*qos), false, *payload) + token.Wait() + } + + client.Disconnect(250) + fmt.Println("Sample Publisher Disconnected") + } else { + receiveCount := 0 + choke := make(chan [2]string) + + opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) { + choke <- [2]string{msg.Topic(), string(msg.Payload())} + }) + + client := MQTT.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + for receiveCount < *num { + incoming := <-choke + fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1]) + receiveCount++ + } + + client.Disconnect(250) + fmt.Println("Sample Subscriber Disconnected") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem new file mode 100644 index 000000000..16c664a43 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/CAfile.pem @@ -0,0 +1,150 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA + Validity + Not Before: Oct 21 19:24:23 2013 GMT + Not After : Sep 25 19:24:23 2018 GMT + Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:c2:d1:d0:31:dc:93:c3:ad:88:0d:f8:93:fe:cc: + aa:04:1d:85:aa:c3:bb:bd:87:04:f0:42:67:14:34: + 4a:56:94:2b:bf:d0:6b:72:30:38:39:35:20:8c:e3: + 7e:65:82:b0:7e:3e:1d:f1:18:82:b7:d6:19:59:43: + ed:81:be:eb:51:44:fc:77:9e:37:ad:e1:a0:18:b9: + 4b:59:79:90:81:a4:e4:52:2f:fc:e2:ff:98:10:5e: + d5:13:9a:16:62:1a:e0:cb:ab:1d:ae:da:d1:40:d4: + 97:b1:e6:e3:f1:97:2c:2a:52:73:ab:d0:a2:15:f3: + 1e:9a:b0:67:d0:62:67:4b:74:b0:bb:8f:ef:9e:32: + 6a:4c:27:4e:82:7c:16:66:ce:06:e9:a3:d9:36:4f: + f4:3e:bc:80:00:93:c1:ca:31:cf:03:68:d4:e5:8b: + 38:45:b6:1b:35:b0:c0:e9:4a:62:75:83:01:aa:b9: + c1:0b:c0:ee:97:c0:73:23:cd:34:ec:bb:3c:95:35: + c8:2d:69:ff:86:d8:1f:c8:04:7e:18:de:62:c2:4b: + 37:c6:aa:8e:03:bf:2b:0d:97:20:2a:75:47:ec:98: + 29:3c:64:52:ef:91:8b:63:0f:6a:f8:c2:9d:08:6a: + 61:68:6f:64:9a:56:b2:0a:bc:7b:59:3d:7f:fd:ba: + 12:4b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA + X509v3 Authority Key Identifier: + keyid:5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA + + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha1WithRSAEncryption + 3c:89:0b:bd:49:10:a6:1a:f6:2a:4b:5f:02:3d:ee:f3:19:4f: + c9:10:79:9c:01:ef:88:22:3d:03:5b:1a:14:46:b6:7f:9b:af: + a5:99:1a:d4:d4:9b:d6:6f:c1:fe:96:8f:9a:9e:47:42:b4:ee: + 21:56:6a:c4:92:38:6c:81:cd:8e:31:43:86:7c:97:15:90:80: + d8:21:f0:46:be:2a:2f:f2:96:07:85:74:a8:fa:1b:78:8f:80: + c1:5e:bc:d9:06:c2:33:9e:8e:f9:08:dd:43:7b:6f:5a:22:67: + 46:78:5d:fb:4a:4e:c2:c6:29:94:17:53:a6:c5:a9:d6:67:06: + 4f:07:ef:da:5b:45:21:83:cb:31:b2:dc:dc:ac:13:19:98:3f: + 98:5f:2c:b4:b4:da:d4:43:d7:a9:1a:6e:b6:cf:be:85:a8:80: + 1f:8a:c1:95:8a:83:a4:af:d2:23:4a:b6:18:87:4e:28:31:36: + 03:2c:bf:e4:9e:b6:75:fd:c4:68:ed:4d:d5:a8:fa:a5:81:13: + 17:1c:43:67:02:1c:d0:e6:00:6e:8b:13:e6:60:1f:ba:40:78: + 93:25:ca:59:5a:71:cc:58:d4:52:63:1d:b3:3c:ce:37:f1:89: + 78:fc:13:fa:b3:ea:22:af:17:68:8a:a1:59:57:f5:1a:49:6e: + b9:f6:5f:b3 +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO +MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO +MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy +M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 +MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 +MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj +fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa +FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8 +FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN +NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC +nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u +WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ +T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x +Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK +TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo +gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L +E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M= +-----END CERTIFICATE----- +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA + Validity + Not Before: Oct 21 19:24:23 2013 GMT + Not After : Sep 25 19:24:23 2018 GMT + Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy Intermediate CA + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:cf:7d:92:07:a5:56:1b:6f:4c:f3:34:c2:12:c2: + 34:62:3b:69:aa:a6:0c:c6:70:5b:93:bc:dc:41:98: + 61:87:61:36:be:8c:08:dd:31:a9:33:76:d3:66:3e: + 77:60:1e:ed:9e:e1:e5:ef:bf:17:91:ac:0c:63:07: + 01:ab:30:67:bc:16:a6:2f:79:f0:61:8c:79:2d:3c: + 98:60:74:61:c4:5f:60:44:85:71:92:9d:cc:7b:14: + 39:74:aa:44:f9:9f:ae:f6:c7:8d:c3:01:47:53:24: + ac:7b:a2:f6:c5:7d:65:37:40:0b:20:c8:d4:14:cd: + f8:f4:57:ea:23:70:f4:e3:99:2b:1c:9a:67:37:ed: + 93:c7:a7:7c:86:90:f7:ae:fc:6f:4b:18:dc:d5:eb: + f3:68:33:d6:78:14:d1:ca:a7:06:7d:75:34:f6:c0: + d4:15:1b:21:2b:78:d9:76:24:a5:f0:c6:13:c8:1e: + 4a:c8:ca:77:34:4e:f8:fa:49:5f:6c:e1:66:a8:65: + f0:8c:bc:44:20:03:ac:af:4a:61:a5:39:48:51:1b: + cb:d8:22:29:60:27:47:42:fc:bf:6a:77:65:58:09: + 20:82:1c:d1:16:5e:5a:18:ea:99:61:8e:93:94:27: + 30:20:dd:44:03:50:43:b4:ec:a3:0f:ee:91:69:d7: + b1:5b + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha1WithRSAEncryption + 39:a0:8d:2f:68:22:1d:4f:3e:db:f1:9b:29:20:77:23:f8:21: + 34:17:84:00:88:a8:3e:a1:4d:84:94:90:96:02:e6:6a:b4:20: + 51:a0:66:20:38:05:18:aa:2a:3e:9a:50:60:af:eb:4a:70:ac: + 9b:59:30:d5:17:14:9c:b4:91:6a:1b:c3:45:8a:dd:cd:2f:c6: + c5:8c:fe:d0:76:20:63:a4:97:db:e3:2a:8e:c1:3d:c8:b6:06: + 2d:49:7a:d9:8a:de:16:ea:5d:5f:fb:41:79:0d:8f:d2:23:00: + d9:b9:6f:93:45:bb:74:17:ea:6b:72:13:01:86:fe:8d:7e:8f: + 27:71:76:a9:37:6d:6c:90:5a:3f:d9:6d:4d:6c:a4:64:7a:ea: + 82:c9:87:ee:6a:d0:6e:30:05:7f:19:1d:19:31:a9:9a:ce:21: + 84:da:47:c7:a0:66:12:e8:7e:57:69:5d:9c:24:e5:46:3c:bf: + 37:f6:88:c3:b1:42:de:3b:81:ed:f5:ae:e2:23:9e:c2:89:a1: + e7:5c:1d:49:0f:ed:ae:55:60:0e:4e:4c:e9:8a:64:e6:ae:c5: + d1:99:a7:70:4c:7e:5d:53:ac:88:2c:0f:0b:21:94:1a:32:f9: + a1:cc:1e:67:98:6b:b6:e9:b1:b9:4b:46:02:b1:65:c9:49:83: + 80:bd:b9:70 +-----BEGIN CERTIFICATE----- +MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO +MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO +MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy +M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 +MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 +MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH +YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE +X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj +mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw +xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY +CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud +EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX +hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK +3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX +6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa +R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK +ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README new file mode 100644 index 000000000..aa7c97d70 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/README @@ -0,0 +1,9 @@ +Certificate structure: + +Root CA + | + |-> Intermediate CA + | + |-> Server + | + |-> Client diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem new file mode 100644 index 000000000..5069e08e8 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-crt.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDRzCCAi8CAQIwDQYJKoZIhvcNAQEFBQAwbTELMAkGA1UEBhMCVVMxDjAMBgNV +BAgMBUR1bW15MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNV +BAsMBUR1bW15MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwHhcNMTMx +MDIxMTkyNDIzWhcNMTgwOTI1MTkyNDIzWjBmMQswCQYDVQQGEwJVUzEOMAwGA1UE +CAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEOMAwGA1UE +CwwFRHVtbXkxFzAVBgNVBAMMDkR1bW15IChjbGllbnQpMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5r +bFxHZ5ye36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3y +lLtHCLi5nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+Fb +maHEU3LHua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y +5/cnc7XGsTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYP +zC4nSN8R2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABMA0GCSqGSIb3 +DQEBBQUAA4IBAQAMWt9qMUOY5z1uyYcjUnconPHLM9MADCZI2sRbfdBOBHEnTVKv +Y63SWnCt8TRJb01LKLIEys6pW1NUlxr6b+FwicNmycR0L8b63cmNXg2NmSZsnK9C +fGT6BbbDdVPYjvmghpSd3soBGBLPsJvaFc6UL5tunm+hT7PxWjDxHZEiE18PTs05 +Vpp/ytILzhoXvJeFOWQHIdf4DLR5izGMNTKdQzgg1eBq2vKgjJIlEZ3j/AyHkJLE +qFip1tyc0PRzgKYFLWttaZzakCLJOGuxtvYB+GrixVM7U23p5LQbLE0KX7fe2Gql +xKMfSID5NUDNf1SuSrrGLD3gfnJEKVB8TVBk +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem new file mode 100644 index 000000000..7665fb655 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/client-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5rbFxHZ5ye +36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3ylLtHCLi5 +nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+FbmaHEU3LH +ua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y5/cnc7XG +sTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYPzC4nSN8R +2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABAoIBABosCiZdHIW3lHKD +leLqL0e/G0QR4dDhUSoTeMRUiceyaM91vD0r6iOBL1u7TOEw+PIOfWY7zCbQ9gXM +fcxy+hbVy9ogBq0vQbv+v7SM6DrUJ06o11fFHSyLmlNVXr0GiS+EZF4i2lJhQd5W +aAVZetJEJRDxK5eHiEswnV2UUGvx6VCpFILL0JVGxWY7oOPxiiBLl+cmfRZdTfGx +46VzQvBu7N8hGpCIsljuVFP/DxR7c+2oyrtFaFSMZBMNI8fICgkb2QeLk/XUBXtn +0bDttgmOP/BvnNAor7nIRoeer/7kbXc9jOsgXwnvDKPapltQddL+exycXzbIjLuY +Z2SFsDECgYEA+2A4QGV0biqdICAoKCHCHCU/CrdDUQiQDHqRU6/nhka7MFPSl4Wy +9oISRrYZhKIbSbaXwTW5ZcYq8Hpn/yGYIWlINP9sjprnOWPE7L74lac+PFWXNMUI +jNJOJkLK1IeppByXAt5ekGBrG556bhzRCJsTjYsyUR/r/bMEF1FD8WMCgYEA5MHM +hqmkDK5CbklVaPonNc251Lx+HSzzQ40WExC/PrCczRaZMKlhmyKZfWJCInQsUDln +w6Lqa5UnwZV2HYAF30VZYQsq84ulNnx1/36BEZyIimfAL1WHvKeGWjGsZqniXxxb +Os5wEMAvxk0SWVrR5v6YpBDv3t9+lLg/bzBOAY8CgYEAuZ0q7CH9/vroWrhj7n4+ +3pmCG1+HDWbNNumqNalFxBimT+EVN1058FvLMvtzjERG8f8pvzj0VPom6rr336Pm +uYUMFFYmyoYHBpFs74Nz+s0rX1Gz/PsgfRstKYNYUeZ6lPunZi7clK8dZ591t6j/ +kOMxZOrLlKuFjieJdc5D5RECgYAVTzxXOwxOJhmIHoq3Sb5HU8/A0oJJA3vxyf3J +buDx3Q/uRvGkR9MQ2YtE09dnUD0kiARzhASkWvOmI98p5lglsVcfJCQvJc4RIkz3 +rPgnBNbvVbTgc+4+E7j/Q+tUcPTmeUTCWKK13MFWjq1r53rwMr1TY0SFFXq8LeGy +4OQTXwKBgQDCuPN3Q+EJusYy7TXt0WicY/xyu15s1216N7PmRKFr/WAn2JdAfjbD +JKDwVqo0AQiEDAobJk0JMPs+ENK2d58GsybCK4QGAh6z5FGunb5T432YfnoXtL3J +ZKVvkf7eowvokTIeiDf3XrCPajLDBpo88Xax+RH03US7XRdu/fVzMA== +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem new file mode 100644 index 000000000..6b2658ae2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-crt.pem @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO +MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO +MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy +M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 +MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 +MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH +YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE +X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj +mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw +xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY +CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud +EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX +hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK +3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX +6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa +R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK +ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem new file mode 100644 index 000000000..747736097 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/intermediateCA-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEAz32SB6VWG29M8zTCEsI0YjtpqqYMxnBbk7zcQZhhh2E2vowI +3TGpM3bTZj53YB7tnuHl778XkawMYwcBqzBnvBamL3nwYYx5LTyYYHRhxF9gRIVx +kp3MexQ5dKpE+Z+u9seNwwFHUySse6L2xX1lN0ALIMjUFM349FfqI3D045krHJpn +N+2Tx6d8hpD3rvxvSxjc1evzaDPWeBTRyqcGfXU09sDUFRshK3jZdiSl8MYTyB5K +yMp3NE74+klfbOFmqGXwjLxEIAOsr0phpTlIURvL2CIpYCdHQvy/andlWAkgghzR +Fl5aGOqZYY6TlCcwIN1EA1BDtOyjD+6RadexWwIDAQABAoIBAEs6OsS85DBENUEE +QszsTnPDGLd/Rqh3uiwhUDYUGmAsFd4WBWy1AaSgE1tBkKRv8jUlr+kxfkkZeNA6 +jRdVEHc4Ov6Blm63sIN/Mbve1keNUOjm/NtsjOOe3In45dMfWx8sELC/+O0jIcod +tpy5rwXOGXrEdWgpmXZ1nXVGEfOmQH3eGEPkqbY1I4YlAoXD0mc5fNQQrn7qrogH +M5USCnC44yIIF0Yube2Fg0Cem41vzIvENAlZC273gyW+pQwez0uma2LaCWmkEz1N +sESrNSQ4yeQnDQYlgX2w3RRpqql4GDzAdISL2WJcNhW6KJ72B0SQ1ny/TmQgZePG +Ojv1T0ECgYEA9CXqKyXBSPF+Wdc/fNagrIi6tcNkLAN2/p5J3Z6TtbZGjItoMlDX +c+hwHobcI3GZLMlxlBx7ePc7cKgaMDXrl8BZZjFoyEV9OHOLicfNkLFmBIZ14gtX +bGZYDuCcal46r7IKRjT8lcYWCoLJnI9vLEII7Q7P/eBgcntw3+h/ziECgYEA2ZAa +bp9d0xBaOXq/E341guxNG49R09/DeZ/2CEM+V1pMD8OVH9cvxrBdDLUmAnrqeGTh +Djoi1UEbOVAV6/dXbTQHrla+HF4Uq+t9tV+mt68TEa54PQ/ERt5ih3nZGBiqZ6rX +SGeyZmIXMLIZEs2dIbJ2DmLcZj6Tjxkd/PxPt/sCgYBGczZaEv/uK3k5NWplfI1K +m/28e1BJfwp0OHq6D4sx8RH0djmv4zH4iUbpGCMnuxznFo3Gnl1mr3igbnF4HecI +mAF0AqfoulyC0JygOl5v9TCp957Ghl1Is1OPn3KjIuOuVSKv1ZRZJ5qul8TTf3Qm +AjwPI6oS6Q8LmeEdSzqt4QKBgB5MglHboe5t/ZK5tHibgApOrGJlMEkohYmfrFz0 +OG9j5OnhHBiGGGI8V4kYhUWdJqBDtFAN6qH2Yjs2Gwd0t9k+gL9X1zwOIiTbM/OZ +cZdtK2Ov/5DJbFVOTTx+zKwda0Xqtfagcmjtyjr+4p0Kw5JYzzYrsHQQzO4F2nZM +ETIXAoGADskTzhgpPrC5/qfuLY4gBUtCfYIb8kaKN90AT8A/14lBrT4lSnmsEvKP +tRDmFjnc/ogDlHa5SRDijtT6UoyQPuauAt6DYrJ8G6qKJqiMwJcuLV1XFks7z1J8 +VzB8kso1pPAtcvVXBPklsjvZ10NdQOCqm4N3EVp69agbB1oco4I= +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt new file mode 100644 index 000000000..b8535e887 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/mosquitto.org.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC8DCCAlmgAwIBAgIJAOD63PlXjJi8MA0GCSqGSIb3DQEBBQUAMIGQMQswCQYD +VQQGEwJHQjEXMBUGA1UECAwOVW5pdGVkIEtpbmdkb20xDjAMBgNVBAcMBURlcmJ5 +MRIwEAYDVQQKDAlNb3NxdWl0dG8xCzAJBgNVBAsMAkNBMRYwFAYDVQQDDA1tb3Nx +dWl0dG8ub3JnMR8wHQYJKoZIhvcNAQkBFhByb2dlckBhdGNob28ub3JnMB4XDTEy +MDYyOTIyMTE1OVoXDTIyMDYyNzIyMTE1OVowgZAxCzAJBgNVBAYTAkdCMRcwFQYD +VQQIDA5Vbml0ZWQgS2luZ2RvbTEOMAwGA1UEBwwFRGVyYnkxEjAQBgNVBAoMCU1v +c3F1aXR0bzELMAkGA1UECwwCQ0ExFjAUBgNVBAMMDW1vc3F1aXR0by5vcmcxHzAd +BgkqhkiG9w0BCQEWEHJvZ2VyQGF0Y2hvby5vcmcwgZ8wDQYJKoZIhvcNAQEBBQAD +gY0AMIGJAoGBAMYkLmX7SqOT/jJCZoQ1NWdCrr/pq47m3xxyXcI+FLEmwbE3R9vM +rE6sRbP2S89pfrCt7iuITXPKycpUcIU0mtcT1OqxGBV2lb6RaOT2gC5pxyGaFJ+h +A+GIbdYKO3JprPxSBoRponZJvDGEZuM3N7p3S/lRoi7G5wG5mvUmaE5RAgMBAAGj +UDBOMB0GA1UdDgQWBBTad2QneVztIPQzRRGj6ZHKqJTv5jAfBgNVHSMEGDAWgBTa +d2QneVztIPQzRRGj6ZHKqJTv5jAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUA +A4GBAAqw1rK4NlRUCUBLhEFUQasjP7xfFqlVbE2cRy0Rs4o3KS0JwzQVBwG85xge +REyPOFdGdhBY2P1FNRy0MDr6xr+D2ZOwxs63dG1nnAnWZg7qwoLgpZ4fESPD3PkA +1ZgKJc2zbSQ9fCPxt2W3mdVav66c6fsb7els2W2Iz7gERJSX +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem new file mode 100644 index 000000000..1ddb0d494 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-crt.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO +MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO +MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy +M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15 +MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15 +MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj +fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa +FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8 +FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN +NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC +nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u +WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ +T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x +Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK +TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo +gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L +E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem new file mode 100644 index 000000000..278287687 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/rootCA-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwtHQMdyTw62IDfiT/syqBB2FqsO7vYcE8EJnFDRKVpQrv9Br +cjA4OTUgjON+ZYKwfj4d8RiCt9YZWUPtgb7rUUT8d543reGgGLlLWXmQgaTkUi/8 +4v+YEF7VE5oWYhrgy6sdrtrRQNSXsebj8ZcsKlJzq9CiFfMemrBn0GJnS3Swu4/v +njJqTCdOgnwWZs4G6aPZNk/0PryAAJPByjHPA2jU5Ys4RbYbNbDA6UpidYMBqrnB +C8Dul8BzI8007Ls8lTXILWn/htgfyAR+GN5iwks3xqqOA78rDZcgKnVH7JgpPGRS +75GLYw9q+MKdCGphaG9kmlayCrx7WT1//boSSwIDAQABAoIBAGphOzge5Cjzdtl6 +JQX7J9M7c6O9YaSqN44iFDs6GmWQXxtMaX9eyTSjx/RmvLwdUtZ8gMkHw0kzBYBy +0RwJ7mDgNKP0px6xl0Qo2fYvpTLFoU8nmQUy4AwAXIVpnFNRrfJIq9qw7ZZi/7pL +A6kGDT3G7Bajw/4MVWfOb8GgGhte1ZhZgXFEZNjGkhwi3Na1/6slOQIfnkkhco0X +ru1Cw82nXNPHqu6K+pbHP9ucYdUNZWRh+yQS3p92lr5tB3/IL/lD0Cl3+xP8JFl+ +5NMSISOKGb3ld0rzrJd1ncgLgv/XlHu8DqvcFs9QwXbaUlG0U/0GrorGYqFaZYaH +R1rkZjECgYEA9mAarVAeL7IOeEIg28f/qyp//5+pMzRpVhnI+xscHB5QUO9WH+uE +nOXwcGvcRME134H4o/0j75aMhVs7sGfMOQ+enAwOxRC5h4MCClDSWysWftU8Ihhf +Sm6eZ0kYLZNqXt/TxTs124NiF1Bb5pekzEr9fTj//vP4meuAQ/D0JoUCgYEAym4f +BCm5tLwYYxZM4tko0g9BHxy4aAPfyshuLed1JjkK4JCFp368GBoknj5rUNewTun2 +1zkQF9b5Mi3k5qWkboP5rpp7DuG3PJdWypV6b/btUeqcyG1gteQwTAwebfqeM0vH +QvpuAoRMtEcSBQBl2s9zgmObXUpDlLwuIlL+to8CgYEAyJBtxx8Mo9k4jE+Q/jnu ++QFtF8R68jM9eRkeksR7+qv2yBw+KVgKKcvKE0rLErGS0LO2nJELexQ8qqcdjTrC +dsUvYmsybtxxnE5bD9jBlfQaqP+fp0Xd9PLeQsivRRLXqgpeFBZifqOS69XAKpTS +VHjLqPAI/hzQCUU8spJpvx0CgYAePgt2NMGgxcUi8I72CRl3IH5LJqBKMeH6Sq1j +QEQZPMZqPE0rc9yoASfdWFfyEPcvIvcUulq0JRK/s2mSJ8cEF8Vyl3OxCnm0nKuD +woczOQHFjjZ0HxsmsXuhsOHO7nU6FqUjVYSf7aIEAOYpRyDwarPIFBd+/XxROTfv +OtUA8wKBgAOiGXRxycb4rAtJBDqPAgdAAwNgvQHyVgn32ArWtgu8ermuZW5h1y45 +hULFvCbLSCpo+I7QhRhw4y2DoB1DgIw04BeFUIcE+az7HH3euAyCLQ0caaA8Xk/6 +bpPfUMe1SNi51f345QlOPvvwGllTC6DeBhZ730k7VNB32dOCV3kE +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem new file mode 100644 index 000000000..f3de3caa2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-crt.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYTCCAkmgAwIBAgIBATANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJVUzEO +MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO +MAwGA1UECwwFRHVtbXkxHjAcBgNVBAMMFUR1bW15IEludGVybWVkaWF0ZSBDQTAe +Fw0xMzEwMjExOTI0MjNaFw0xODA5MjUxOTI0MjNaMGYxCzAJBgNVBAYTAlVTMQ4w +DAYDVQQIDAVEdW1teTEOMAwGA1UEBwwFRHVtbXkxDjAMBgNVBAoMBUR1bW15MQ4w +DAYDVQQLDAVEdW1teTEXMBUGA1UEAwwORHVtbXkgKHNlcnZlcikwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0fQCRUWXt+i7JMR55Zuo6wBRxG7RnPutN +2L7J/18io52vxjm8AZDiC0JFkCHh72ZzvbgVA+e+WxAIYfioRis4JWw4jK8v5m8q +cZzS0GJNTMROPiZQi7A81tAbrV00XN7d5PsmIJ2Bf4XbJWMy31CsmoFloeRMd7bR +LxwDIb0qqRawhKsWdfZB/c9wGKmHlei50B7PXk+koKnVdsLwXxtCZDvc/3fNRHEK +lZs4m0N05G38FdrnczPm/0pie87nK9rnklL7u1sYOukOznnOtW5h7+A4M+DxzME0 +HRU6k4d+6QvukxBlsE93gHhwRsejIuDGlqD+DRxk2PdmmgsmPH59AgMBAAGjEzAR +MA8GA1UdEQQIMAaHBAoKBOQwDQYJKoZIhvcNAQEFBQADggEBAJ3bKs2b4cAJWTZj +69dMEfYZKcQIXs7euwtKlP7H8m5c+X5KmZPi1Puq4Z0gtvLu/z7J9UjZjG0CoylV +q15Zp5svryJ7XzcsZs7rwyo1JtngW1z54wr9MezqIOF2w12dTwEAINFsW7TxAsH7 +bfqkzZjuCbbsww5q4eHuZp0yaMHc3hOGaUot27OTlxlIMhv7VBBqWAj0jmvAfTKf +la0SiL/Mc8rD8D5C0SXGcCL6li/kqtinAxzhokuyyPf+hQX35kcZxEPu6WxtYVLv +hMzrokOZP2FrGbCnhaNT8gw4Aa0RXV1JgonRWYSbkeaCzvr2bJ0OuJiDdwdRKvOo +raKLlfY= +-----END CERTIFICATE----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem new file mode 100644 index 000000000..951ad0efa --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/samplecerts/server-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAtH0AkVFl7fouyTEeeWbqOsAUcRu0Zz7rTdi+yf9fIqOdr8Y5 +vAGQ4gtCRZAh4e9mc724FQPnvlsQCGH4qEYrOCVsOIyvL+ZvKnGc0tBiTUzETj4m +UIuwPNbQG61dNFze3eT7JiCdgX+F2yVjMt9QrJqBZaHkTHe20S8cAyG9KqkWsISr +FnX2Qf3PcBiph5XoudAez15PpKCp1XbC8F8bQmQ73P93zURxCpWbOJtDdORt/BXa +53Mz5v9KYnvO5yva55JS+7tbGDrpDs55zrVuYe/gODPg8czBNB0VOpOHfukL7pMQ +ZbBPd4B4cEbHoyLgxpag/g0cZNj3ZpoLJjx+fQIDAQABAoIBAG0UfxtUTn4dDdma +TgihIj6Ph8s0Kzua0yshK215YU3WBJ8O9iWh7KYwl8Ti7xdVUF3y8yYATjbFYlMu +otFQVx5/v4ANxnL0mYrVTyo5tq9xDdMbzJwxUDn0uaGAjSvwVOFWWlMYsxhoscVY +OzOrs14dosaBqTBtyZdzGULrSSBWPCBlucRcvTV/eZwgYrYJ3bG66ZTfdc930KPj +nfkWrsAWmPz8irHoWQ2OX+ZJTprVYRYIZXqpFn3zuwmhpJkZUVULMMk6LFBKDmBT +F2+b4h49P+oNJ+6CRoOERHYq2k1MmYBcu1z8lMjdfRGUDdK4vS9pcqhBXJJg1vU9 +APRtfiECgYEA6Y3LqQJLkUI0w6g/9T+XyzUoi0aUfH6PT81XnGYqJxTBHinZvgML +mF3qtZ0bHGwEoAsyhSgDkeCawE/E7Phd+B6aku2QMVm8GHygZg0Pbao4cxXv+CF3 +i1Lo7n3zY0kTVrjsvDRsDDESmRK4Ea48fJwOfUEtfG6VDtwmZAe8chcCgYEAxdWd +sWcc45ARi2vY6yb5Ysgt/g0z26KyQydF+GMWIz1FDfUxXJ/axdCovd3VIHDvItJE +n9LjFiobkyOKX99ou1foWwsmhn11duVrF7hsVrE0nsbd4RX3sTbqXa9x3GN/ujFr +0xHUTmiXt3Qyn/076jBiLGnbtzSxJ/IZIEI9VIsCgYEAketHnTaT5BOLR9ss6ptq +yUlTJYFZcFbaTy+qV0r1dyleZuwa4L6iVfYHmKSptZ4/XYbhb5RKdq/vv8uW679Z +ZpYoWTgX6N15yYrD5D6wrwG09yJzpYGzYNbSNX93u0aC0KIFNqlCAHQAfKbXXiSQ +IgKWgudf9ehZNMmTKtgygs0CgYAoTV9Fr7Lj7QqV84+KQDNX2137PmdNHDTil1Ka +ylzNKwMxV70JmIsx91MY8uMjK76bwmg2gvi+IC/j5r6ez11/pOXx/jCH/3D5mr0Z +ZPm1I36LxgmXfCkskfpmwYIZmq9/l+fWZPByVL5roiFaFHWrPNYTJDGdff+FGr3h +o3zpBwKBgDY1sih/nY+6rwOP+DcabGK9KFFKLXsoJrXobEniLxp7oFaGN2GkmKvN +NajCs5pr3wfb4LrVrsNvERnUsUXWg6ReLqfWbT4bmjzE2iJ3IbtVQ5M4kl6YrbdZ +PMgWoLCqnoo8NoGBtmVMWhaXNJvVZPgZHk33T5F0Cg6PKNdHDchH +-----END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go new file mode 100644 index 000000000..d3f755025 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sango.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "os" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) +} + +var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) { + fmt.Println("onConnect") + if token := client.Subscribe("shirou@github/#", 0, nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } +} + +var subscribed = "#" + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883") + opts.SetDefaultPublishHandler(f) + opts.SetOnConnectHandler(onConnect) + opts.SetCleanSession(true) + + opts.SetUsername("shirou@github") + opts.SetPassword("8Ub6F68kfYlr7RoV") + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + qos := 0 + retain := false + payload := "sanple" + topic := "shirou@github/log" + token := c.Publish(topic, byte(qos), retain, payload) + // token.Wait() + fmt.Println("%v", token.Error()) + + for { + time.Sleep(1 * time.Second) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go new file mode 100644 index 000000000..0a8f4865c --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim.go @@ -0,0 +1,35 @@ +package main + +import ( + "fmt" + "os" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) +} + +var subscribed = "#" + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") + opts.SetDefaultPublishHandler(f) + opts.SetCleanSession(true) + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + if token := c.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + for { + time.Sleep(1 * time.Second) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go new file mode 100644 index 000000000..3d9b7d262 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim2.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "os" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) +} + +var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) { + fmt.Println("onConnect") + if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } +} + +var subscribed = "#" + +func main() { + // opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883") + opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883") + opts.SetDefaultPublishHandler(f) + opts.SetOnConnectHandler(onConnect) + opts.SetCleanSession(true) + + opts.SetUsername("shirou@github.com") + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + for { + time.Sleep(1 * time.Second) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go new file mode 100644 index 000000000..3f89f8af2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/sim_pub.go @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package main + +import ( + "flag" + "fmt" + "os" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +/* +Options: + [-help] Display help + [-a pub|sub] Action pub (publish) or sub (subscribe) + [-m ] Payload to send + [-n ] Number of messages to send or receive + [-q 0|1|2] Quality of Service + [-clean] CleanSession (true if -clean is present) + [-id ] CliendID + [-user ] User + [-password ] Password + [-broker ] Broker URI + [-topic ] Topic + [-store ] Store Directory + +*/ + +func main() { + topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe") + broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883") + password := flag.String("password", "", "The password (optional)") + user := flag.String("user", "", "The User (optional)") + id := flag.String("id", "testgoid", "The ClientID (optional)") + cleansess := flag.Bool("clean", false, "Set Clean Session (default false)") + qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)") + num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)") + payload := flag.String("message", "", "The message text to publish (default empty)") + action := flag.String("action", "", "Action publish or subscribe (required)") + store := flag.String("store", ":memory:", "The Store Directory (default use memory store)") + flag.Parse() + + if *action != "pub" && *action != "sub" { + fmt.Println("Invalid setting for -action, must be pub or sub") + return + } + + if *topic == "" { + fmt.Println("Invalid setting for -topic, must not be empty") + return + } + + fmt.Printf("Sample Info:\n") + fmt.Printf("\taction: %s\n", *action) + fmt.Printf("\tbroker: %s\n", *broker) + fmt.Printf("\tclientid: %s\n", *id) + fmt.Printf("\tuser: %s\n", *user) + fmt.Printf("\tpassword: %s\n", *password) + fmt.Printf("\ttopic: %s\n", *topic) + fmt.Printf("\tmessage: %s\n", *payload) + fmt.Printf("\tqos: %d\n", *qos) + fmt.Printf("\tcleansess: %v\n", *cleansess) + fmt.Printf("\tnum: %d\n", *num) + fmt.Printf("\tstore: %s\n", *store) + + opts := MQTT.NewClientOptions() + opts.AddBroker(*broker) + opts.SetClientID(*id) + opts.SetUsername(*user) + opts.SetPassword(*password) + opts.SetCleanSession(*cleansess) + if *store != ":memory:" { + opts.SetStore(MQTT.NewFileStore(*store)) + } + + if *action == "pub" { + client := MQTT.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + fmt.Println("Sample Publisher Started") + for i := 0; i < *num; i++ { + fmt.Println("---- doing publish ----") + token := client.Publish(*topic, byte(*qos), false, *payload) + token.Wait() + } + + client.Disconnect(250) + fmt.Println("Sample Publisher Disconnected") + } else { + receiveCount := 0 + choke := make(chan [2]string) + + opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) { + choke <- [2]string{msg.Topic(), string(msg.Payload())} + }) + + client := MQTT.NewClient(opts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + for receiveCount < *num { + incoming := <-choke + fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1]) + receiveCount++ + } + + client.Disconnect(250) + fmt.Println("Sample Subscriber Disconnected") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go new file mode 100644 index 000000000..0caf2d5c0 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/simple.go @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package main + +import ( + "fmt" + "os" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) +} + +func main() { + opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("gotrivial") + opts.SetDefaultPublishHandler(f) + + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + if token := c.Subscribe("/go-mqtt/sample", 0, nil); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + for i := 0; i < 5; i++ { + text := fmt.Sprintf("this is msg #%d!", i) + token := c.Publish("/go-mqtt/sample", 0, false, text) + token.Wait() + } + + time.Sleep(3 * time.Second) + + if token := c.Unsubscribe("/go-mqtt/sample"); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + os.Exit(1) + } + + c.Disconnect(250) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go new file mode 100644 index 000000000..c4efc27f2 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/ssl.go @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +/* +To run this sample, The following certificates +must be created: + + rootCA-crt.pem - root certificate authority that is used + to sign and verify the client and server + certificates. + rootCA-key.pem - keyfile for the rootCA. + + server-crt.pem - server certificate signed by the CA. + server-key.pem - keyfile for the server certificate. + + client-crt.pem - client certificate signed by the CA. + client-key.pem - keyfile for the client certificate. + + CAfile.pem - file containing concatenated CA certificates + if there is more than 1 in the chain. + (e.g. root CA -> intermediate CA -> server cert) + + Instead of creating CAfile.pem, rootCA-crt.pem can be added + to the default openssl CA certificate bundle. To find the + default CA bundle used, check: + $GO_ROOT/src/pks/crypto/x509/root_unix.go + To use this CA bundle, just set tls.Config.RootCAs = nil. +*/ + +package main + +import "io/ioutil" +import "fmt" +import "time" +import "crypto/tls" +import "crypto/x509" +import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + +func NewTLSConfig() *tls.Config { + // Import trusted certificates from CAfile.pem. + // Alternatively, manually add CA certificates to + // default openssl CA bundle. + certpool := x509.NewCertPool() + pemCerts, err := ioutil.ReadFile("samplecerts/CAfile.pem") + if err == nil { + certpool.AppendCertsFromPEM(pemCerts) + } + + // Import client certificate/key pair + cert, err := tls.LoadX509KeyPair("samplecerts/client-crt.pem", "samplecerts/client-key.pem") + if err != nil { + panic(err) + } + + // Just to print out the client certificate.. + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + panic(err) + } + fmt.Println(cert.Leaf) + + // Create tls.Config with desired tls properties + return &tls.Config{ + // RootCAs = certs used to verify server cert. + RootCAs: certpool, + // ClientAuth = whether to request cert from server. + // Since the server is set up for SSL, this happens + // anyways. + ClientAuth: tls.NoClientCert, + // ClientCAs = certs used to validate client cert. + ClientCAs: nil, + // InsecureSkipVerify = verify that cert contents + // match server. IP matches what is in cert etc. + InsecureSkipVerify: true, + // Certificates = list of certs client sends to server. + Certificates: []tls.Certificate{cert}, + } +} + +var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) { + fmt.Printf("TOPIC: %s\n", msg.Topic()) + fmt.Printf("MSG: %s\n", msg.Payload()) +} + +func main() { + tlsconfig := NewTLSConfig() + + opts := MQTT.NewClientOptions() + opts.AddBroker("ssl://iot.eclipse.org:8883") + opts.SetClientID("ssl-sample").SetTLSConfig(tlsconfig) + opts.SetDefaultPublishHandler(f) + + // Start the connection + c := MQTT.NewClient(opts) + if token := c.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + + c.Subscribe("/go-mqtt/sample", 0, nil) + + i := 0 + for _ = range time.Tick(time.Duration(1) * time.Second) { + if i == 5 { + break + } + text := fmt.Sprintf("this is msg #%d!", i) + c.Publish("/go-mqtt/sample", 0, false, text) + i++ + } + + c.Disconnect(250) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go new file mode 100644 index 000000000..d5604d2a3 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdinpub.go @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package main + +import ( + "bufio" + "crypto/tls" + "flag" + "fmt" + "io" + //"log" + "os" + "strconv" + "time" +) + +import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" + +func main() { + //MQTT.DEBUG = log.New(os.Stdout, "", 0) + //MQTT.ERROR = log.New(os.Stdout, "", 0) + stdin := bufio.NewReader(os.Stdin) + hostname, _ := os.Hostname() + + server := flag.String("server", "tcp://127.0.0.1:1883", "The full URL of the MQTT server to connect to") + topic := flag.String("topic", hostname, "Topic to publish the messages on") + qos := flag.Int("qos", 0, "The QoS to send the messages at") + retained := flag.Bool("retained", false, "Are the messages sent with the retained flag") + clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection") + username := flag.String("username", "", "A username to authenticate to the MQTT server") + password := flag.String("password", "", "Password to match username") + flag.Parse() + + connOpts := MQTT.NewClientOptions().AddBroker(*server).SetClientID(*clientid).SetCleanSession(true) + if *username != "" { + connOpts.SetUsername(*username) + if *password != "" { + connOpts.SetPassword(*password) + } + } + tlsConfig := &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert} + connOpts.SetTLSConfig(tlsConfig) + + client := MQTT.NewClient(connOpts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + fmt.Println(token.Error()) + return + } + fmt.Printf("Connected to %s\n", *server) + + for { + message, err := stdin.ReadString('\n') + if err == io.EOF { + os.Exit(0) + } + client.Publish(*topic, byte(*qos), *retained, message) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go new file mode 100644 index 000000000..a6b058de1 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/samples/stdoutsub.go @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package main + +import ( + "crypto/tls" + "flag" + "fmt" + //"log" + "os" + "os/signal" + "strconv" + "syscall" + "time" + + MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git" +) + +func onMessageReceived(client *MQTT.Client, message MQTT.Message) { + fmt.Printf("Received message on topic: %s\nMessage: %s\n", message.Topic(), message.Payload()) +} + +var i int64 + +func main() { + //MQTT.DEBUG = log.New(os.Stdout, "", 0) + //MQTT.ERROR = log.New(os.Stdout, "", 0) + c := make(chan os.Signal, 1) + i = 0 + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("signal received, exiting") + os.Exit(0) + }() + + hostname, _ := os.Hostname() + + server := flag.String("server", "tcp://127.0.0.1:1883", "The full url of the MQTT server to connect to ex: tcp://127.0.0.1:1883") + topic := flag.String("topic", "#", "Topic to subscribe to") + qos := flag.Int("qos", 0, "The QoS to subscribe to messages at") + clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection") + username := flag.String("username", "", "A username to authenticate to the MQTT server") + password := flag.String("password", "", "Password to match username") + flag.Parse() + + connOpts := &MQTT.ClientOptions{ + ClientID: *clientid, + CleanSession: true, + Username: *username, + Password: *password, + MaxReconnectInterval: 1 * time.Second, + KeepAlive: 30 * time.Second, + TLSConfig: tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}, + } + connOpts.AddBroker(*server) + connOpts.OnConnect = func(c *MQTT.Client) { + if token := c.Subscribe(*topic, byte(*qos), onMessageReceived); token.Wait() && token.Error() != nil { + panic(token.Error()) + } + } + + client := MQTT.NewClient(connOpts) + if token := client.Connect(); token.Wait() && token.Error() != nil { + panic(token.Error()) + } else { + fmt.Printf("Connected to %s\n", *server) + } + + for { + time.Sleep(1 * time.Second) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go new file mode 100644 index 000000000..4a2ef86eb --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "strconv" +) + +const ( + inboundPrefix = "i." + outboundPrefix = "o." +) + +// Store is an interface which can be used to provide implementations +// for message persistence. +// Because we may have to store distinct messages with the same +// message ID, we need a unique key for each message. This is +// possible by prepending "i." or "o." to each message id +type Store interface { + Open() + Put(string, packets.ControlPacket) + Get(string) packets.ControlPacket + All() []string + Del(string) + Close() + Reset() +} + +// A key MUST have the form "X.[messageid]" +// where X is 'i' or 'o' +func mIDFromKey(key string) uint16 { + s := key[2:] + i, err := strconv.Atoi(s) + chkerr(err) + return uint16(i) +} + +// Return a string of the form "i.[id]" +func inboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", inboundPrefix, id) +} + +// Return a string of the form "o.[id]" +func outboundKeyFromMID(id uint16) string { + return fmt.Sprintf("%s%d", outboundPrefix, id) +} + +// govern which outgoing messages are persisted +func persistOutbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.PubcompPacket: + // Sending puback. delete matching publish + // from ibound + s.Del(inboundKeyFromMID(m.Details().MessageID)) + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket: + // Sending publish. store in obound + // until puback received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + chkcond(false) + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Sending publish. store in obound + // until pubrel received + s.Put(outboundKeyFromMID(m.Details().MessageID), m) + default: + chkcond(false) + } + } +} + +// govern which incoming messages are persisted +func persistInbound(s Store, m packets.ControlPacket) { + switch m.Details().Qos { + case 0: + switch m.(type) { + case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket: + // Received a puback. delete matching publish + // from obound + s.Del(outboundKeyFromMID(m.Details().MessageID)) + case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket: + default: + chkcond(false) + } + case 1: + switch m.(type) { + case *packets.PublishPacket, *packets.PubrelPacket: + // Received a publish. store it in ibound + // until puback sent + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + chkcond(false) + } + case 2: + switch m.(type) { + case *packets.PublishPacket: + // Received a publish. store it in ibound + // until pubrel received + s.Put(inboundKeyFromMID(m.Details().MessageID), m) + default: + chkcond(false) + } + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go new file mode 100644 index 000000000..7644353b1 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Allan Stockdill-Mander + */ + +package mqtt + +import ( + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "sync" + "time" +) + +//PacketAndToken is a struct that contains both a ControlPacket and a +//Token. This struct is passed via channels between the client interface +//code and the underlying code responsible for sending and receiving +//MQTT messages. +type PacketAndToken struct { + p packets.ControlPacket + t Token +} + +//Token defines the interface for the tokens used to indicate when +//actions have completed. +type Token interface { + Wait() bool + WaitTimeout(time.Duration) bool + flowComplete() + Error() error +} + +type baseToken struct { + m sync.RWMutex + complete chan struct{} + ready bool + err error +} + +// Wait will wait indefinitely for the Token to complete, ie the Publish +// to be sent and confirmed receipt from the broker +func (b *baseToken) Wait() bool { + b.m.Lock() + defer b.m.Unlock() + if !b.ready { + <-b.complete + b.ready = true + } + return b.ready +} + +// WaitTimeout takes a time in ms to wait for the flow associated with the +// Token to complete, returns true if it returned before the timeout or +// returns false if the timeout occurred. In the case of a timeout the Token +// does not have an error set in case the caller wishes to wait again +func (b *baseToken) WaitTimeout(d time.Duration) bool { + b.m.Lock() + defer b.m.Unlock() + if !b.ready { + select { + case <-b.complete: + b.ready = true + case <-time.After(d): + } + } + return b.ready +} + +func (b *baseToken) flowComplete() { + close(b.complete) +} + +func (b *baseToken) Error() error { + b.m.RLock() + defer b.m.RUnlock() + return b.err +} + +func newToken(tType byte) Token { + switch tType { + case packets.Connect: + return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Subscribe: + return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)} + case packets.Publish: + return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Unsubscribe: + return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}} + case packets.Disconnect: + return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}} + } + return nil +} + +//ConnectToken is an extension of Token containing the extra fields +//required to provide information about calls to Connect() +type ConnectToken struct { + baseToken + returnCode byte +} + +//ReturnCode returns the acknowlegement code in the connack sent +//in response to a Connect() +func (c *ConnectToken) ReturnCode() byte { + c.m.RLock() + defer c.m.RUnlock() + return c.returnCode +} + +//PublishToken is an extension of Token containing the extra fields +//required to provide information about calls to Publish() +type PublishToken struct { + baseToken + messageID uint16 +} + +//MessageID returns the MQTT message ID that was assigned to the +//Publish packet when it was sent to the broker +func (p *PublishToken) MessageID() uint16 { + return p.messageID +} + +//SubscribeToken is an extension of Token containing the extra fields +//required to provide information about calls to Subscribe() +type SubscribeToken struct { + baseToken + subs []string + subResult map[string]byte +} + +//Result returns a map of topics that were subscribed to along with +//the matching return code from the broker. This is either the Qos +//value of the subscription or an error code. +func (s *SubscribeToken) Result() map[string]byte { + s.m.RLock() + defer s.m.RUnlock() + return s.subResult +} + +//UnsubscribeToken is an extension of Token containing the extra fields +//required to provide information about calls to Unsubscribe() +type UnsubscribeToken struct { + baseToken +} + +//DisconnectToken is an extension of Token containing the extra fields +//required to provide information about calls to Disconnect() +type DisconnectToken struct { + baseToken +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go new file mode 100644 index 000000000..ffe796d28 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/topic.go @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2014 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "errors" + "strings" +) + +//InvalidQos is the error returned when an packet is to be sent +//with an invalid Qos value +var ErrInvalidQos = errors.New("Invalid QoS") + +//InvalidTopicEmptyString is the error returned when a topic string +//is passed in that is 0 length +var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string") + +//InvalidTopicMultilevel is the error returned when a topic string +//is passed in that has the multi level wildcard in any position but +//the last +var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level") + +// Topic Names and Topic Filters +// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard +// to the validity of Topic strings. +// - A Topic must be between 1 and 65535 bytes. +// - A Topic is case sensitive. +// - A Topic may contain whitespace. +// - A Topic containing a leading forward slash is different than a Topic without. +// - A Topic may be "/" (two levels, both empty string). +// - A Topic must be UTF-8 encoded. +// - A Topic may contain any number of levels. +// - A Topic may contain an empty level (two forward slashes in a row). +// - A TopicName may not contain a wildcard. +// - A TopicFilter may only have a # (multi-level) wildcard as the last level. +// - A TopicFilter may contain any number of + (single-level) wildcards. +// - A TopicFilter with a # will match the absense of a level +// Example: a subscription to "foo/#" will match messages published to "foo". + +func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) { + var topics []string + var qoss []byte + for topic, qos := range subs { + if err := validateTopicAndQos(topic, qos); err != nil { + return nil, nil, err + } + topics = append(topics, topic) + qoss = append(qoss, qos) + } + + return topics, qoss, nil +} + +func validateTopicAndQos(topic string, qos byte) error { + if len(topic) == 0 { + return ErrInvalidTopicEmptyString + } + + levels := strings.Split(topic, "/") + for i, level := range levels { + if level == "#" && i != len(levels)-1 { + return ErrInvalidTopicMultilevel + } + } + + if qos < 0 || qos > 2 { + return ErrInvalidQos + } + return nil +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go new file mode 100644 index 000000000..2f5a01466 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/trace.go @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "io/ioutil" + "log" +) + +// Internal levels of library output that are initialised to not print +// anything but can be overridden by programmer +var ( + ERROR *log.Logger + CRITICAL *log.Logger + WARN *log.Logger + DEBUG *log.Logger +) + +func init() { + ERROR = log.New(ioutil.Discard, "", 0) + CRITICAL = log.New(ioutil.Discard, "", 0) + WARN = log.New(ioutil.Discard, "", 0) + DEBUG = log.New(ioutil.Discard, "", 0) +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go new file mode 100644 index 000000000..59f7a4500 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_client_test.go @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "log" + "net/http" + "os" + "testing" + + _ "net/http/pprof" +) + +func init() { + DEBUG = log.New(os.Stderr, "DEBUG ", log.Ltime) + WARN = log.New(os.Stderr, "WARNING ", log.Ltime) + CRITICAL = log.New(os.Stderr, "CRITICAL ", log.Ltime) + ERROR = log.New(os.Stderr, "ERROR ", log.Ltime) + + go func() { + log.Println(http.ListenAndServe("localhost:6060", nil)) + }() +} + +func Test_NewClient_simple(t *testing.T) { + ops := NewClientOptions().SetClientID("foo").AddBroker("tcp://10.10.0.1:1883") + c := NewClient(ops) + + if c == nil { + t.Fatalf("ops is nil") + } + + if c.options.ClientID != "foo" { + t.Fatalf("bad client id") + } + + if c.options.Servers[0].Scheme != "tcp" { + t.Fatalf("bad server scheme") + } + + if c.options.Servers[0].Host != "10.10.0.1:1883" { + t.Fatalf("bad server host") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go new file mode 100644 index 000000000..9d941f697 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_messageids_test.go @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "fmt" + "testing" + "time" +) + +type DummyToken struct{} + +func (d *DummyToken) Wait() bool { + return true +} + +func (d *DummyToken) WaitTimeout(t time.Duration) bool { + return true +} + +func (d *DummyToken) flowComplete() {} + +func (d *DummyToken) Error() error { + return nil +} + +func Test_getID(t *testing.T) { + mids := &messageIds{index: make(map[uint16]Token)} + + i1 := mids.getID(&DummyToken{}) + + if i1 != 1 { + t.Fatalf("i1 was wrong: %v", i1) + } + + i2 := mids.getID(&DummyToken{}) + + if i2 != 2 { + t.Fatalf("i2 was wrong: %v", i2) + } + + for i := uint16(3); i < 100; i++ { + id := mids.getID(&DummyToken{}) + if id != i { + t.Fatalf("id was wrong expected %v got %v", i, id) + } + } +} + +func Test_freeID(t *testing.T) { + mids := &messageIds{index: make(map[uint16]Token)} + + i1 := mids.getID(&DummyToken{}) + mids.freeID(i1) + + if i1 != 1 { + t.Fatalf("i1 was wrong: %v", i1) + } + + i2 := mids.getID(&DummyToken{}) + fmt.Printf("i2: %v\n", i2) +} + +func Test_messageids_mix(t *testing.T) { + mids := &messageIds{index: make(map[uint16]Token)} + + done := make(chan bool) + a := make(chan uint16, 3) + b := make(chan uint16, 20) + c := make(chan uint16, 100) + + go func() { + for i := 0; i < 10000; i++ { + a <- mids.getID(&DummyToken{}) + mids.freeID(<-b) + } + done <- true + }() + + go func() { + for i := 0; i < 10000; i++ { + b <- mids.getID(&DummyToken{}) + mids.freeID(<-c) + } + done <- true + }() + + go func() { + for i := 0; i < 10000; i++ { + c <- mids.getID(&DummyToken{}) + mids.freeID(<-a) + } + done <- true + }() + + <-done + <-done + <-done +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go new file mode 100644 index 000000000..d4181442e --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_options_test.go @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "crypto/tls" + "crypto/x509" + "testing" + "time" +) + +func Test_NewClientOptions_default(t *testing.T) { + o := NewClientOptions() + + if o.ClientID != "" { + t.Fatalf("bad default client id") + } + + if o.Username != "" { + t.Fatalf("bad default username") + } + + if o.Password != "" { + t.Fatalf("bad default password") + } + + if o.KeepAlive != 30*time.Second { + t.Fatalf("bad default timeout") + } +} + +func Test_NewClientOptions_mix(t *testing.T) { + o := NewClientOptions() + o.AddBroker("tcp://192.168.1.2:9999") + o.SetClientID("myclientid") + o.SetUsername("myuser") + o.SetPassword("mypassword") + o.SetKeepAlive(88) + + if o.Servers[0].Scheme != "tcp" { + t.Fatalf("bad scheme") + } + + if o.Servers[0].Host != "192.168.1.2:9999" { + t.Fatalf("bad host") + } + + if o.ClientID != "myclientid" { + t.Fatalf("bad set clientid") + } + + if o.Username != "myuser" { + t.Fatalf("bad set username") + } + + if o.Password != "mypassword" { + t.Fatalf("bad set password") + } + + if o.KeepAlive != 88 { + t.Fatalf("bad set timeout") + } +} + +func Test_ModifyOptions(t *testing.T) { + o := NewClientOptions() + o.AddBroker("tcp://3.3.3.3:12345") + c := NewClient(o) + o.AddBroker("ws://2.2.2.2:9999") + o.SetOrderMatters(false) + + if c.options.Servers[0].Scheme != "tcp" { + t.Fatalf("client options.server.Scheme was modified") + } + + // if c.options.server.Host != "2.2.2.2:9999" { + // t.Fatalf("client options.server.Host was modified") + // } + + if o.Order != false { + t.Fatalf("options.order was not modified") + } +} + +func Test_TLSConfig(t *testing.T) { + o := NewClientOptions().SetTLSConfig(&tls.Config{ + RootCAs: x509.NewCertPool(), + ClientAuth: tls.NoClientCert, + ClientCAs: x509.NewCertPool(), + InsecureSkipVerify: true}) + + c := NewClient(o) + + if c.options.TLSConfig.ClientAuth != tls.NoClientCert { + t.Fatalf("client options.tlsConfig ClientAuth incorrect") + } + + if c.options.TLSConfig.InsecureSkipVerify != true { + t.Fatalf("client options.tlsConfig InsecureSkipVerify incorrect") + } +} + +func Test_OnConnectionLost(t *testing.T) { + onconnlost := func(client *Client, err error) { + panic(err) + } + o := NewClientOptions().SetConnectionLostHandler(onconnlost) + + c := NewClient(o) + + if c.options.OnConnectionLost == nil { + t.Fatalf("client options.onconnlost was nil") + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go new file mode 100644 index 000000000..2ac8831da --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_ping_test.go @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "bytes" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "testing" +) + +func Test_NewPingReqMessage(t *testing.T) { + pr := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket) + if pr.MessageType != packets.Pingreq { + t.Errorf("NewPingReqMessage bad msg type: %v", pr.MessageType) + } + if pr.RemainingLength != 0 { + t.Errorf("NewPingReqMessage bad remlen, expected 0, got %d", pr.RemainingLength) + } + + exp := []byte{ + 0xC0, + 0x00, + } + + var buf bytes.Buffer + pr.Write(&buf) + bs := buf.Bytes() + + if len(bs) != 2 { + t.Errorf("NewPingReqMessage.Bytes() wrong length: %d", len(bs)) + } + + if exp[0] != bs[0] || exp[1] != bs[1] { + t.Errorf("NewPingMessage.Bytes() wrong") + } +} + +func Test_DecodeMessage_pingresp(t *testing.T) { + bs := bytes.NewBuffer([]byte{ + 0xD0, + 0x00, + }) + presp, _ := packets.ReadPacket(bs) + if presp.(*packets.PingrespPacket).MessageType != packets.Pingresp { + t.Errorf("DecodeMessage ping response wrong msg type: %v", presp.(*packets.PingrespPacket).MessageType) + } + if presp.(*packets.PingrespPacket).RemainingLength != 0 { + t.Errorf("DecodeMessage ping response wrong rem len: %d", presp.(*packets.PingrespPacket).RemainingLength) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go new file mode 100644 index 000000000..48e6e392b --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_router_test.go @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "testing" +) + +func Test_newRouter(t *testing.T) { + router, stop := newRouter() + if router == nil { + t.Fatalf("router is nil") + } + if stop == nil { + t.Fatalf("stop is nil") + } + if router.routes.Len() != 0 { + t.Fatalf("router.routes was not empty") + } +} + +func Test_AddRoute(t *testing.T) { + router, _ := newRouter() + calledback := false + cb := func(client *Client, msg Message) { + calledback = true + } + router.addRoute("/alpha", cb) + + if router.routes.Len() != 1 { + t.Fatalf("router.routes was wrong") + } +} + +func Test_Match(t *testing.T) { + router, _ := newRouter() + router.addRoute("/alpha", nil) + + if !router.routes.Front().Value.(*route).match("/alpha") { + t.Fatalf("match function is bad") + } + + if router.routes.Front().Value.(*route).match("alpha") { + t.Fatalf("match function is bad") + } +} + +func Test_match(t *testing.T) { + + check := func(route, topic string, exp bool) { + result := routeIncludesTopic(route, topic) + if exp != result { + t.Errorf("match was bad R: %v, T: %v, EXP: %v", route, topic, exp) + } + } + + // ** Basic ** + R := "" + T := "" + check(R, T, true) + + R = "x" + T = "" + check(R, T, false) + + R = "" + T = "x" + check(R, T, false) + + R = "x" + T = "x" + check(R, T, true) + + R = "x" + T = "X" + check(R, T, false) + + R = "alpha" + T = "alpha" + check(R, T, true) + + R = "alpha" + T = "beta" + check(R, T, false) + + // ** / ** + R = "/" + T = "/" + check(R, T, true) + + R = "/one" + T = "/one" + check(R, T, true) + + R = "/" + T = "/two" + check(R, T, false) + + R = "/two" + T = "/" + check(R, T, false) + + R = "/two" + T = "two" + check(R, T, false) // a leading "/" creates a different topic + + R = "/a/" + T = "/a" + check(R, T, false) + + R = "/a/" + T = "/a/b" + check(R, T, false) + + R = "/a/b" + T = "/a/b" + check(R, T, true) + + R = "/a/b/" + T = "/a/b" + check(R, T, false) + + R = "/a/b" + T = "/R/b" + check(R, T, false) + + // ** + ** + R = "/a/+/c" + T = "/a/b/c" + check(R, T, true) + + R = "/+/b/c" + T = "/a/b/c" + check(R, T, true) + + R = "/a/b/+" + T = "/a/b/c" + check(R, T, true) + + R = "/a/+/+" + T = "/a/b/c" + check(R, T, true) + + R = "/+/+/+" + T = "/a/b/c" + check(R, T, true) + + R = "/+/+/c" + T = "/a/b/c" + check(R, T, true) + + R = "/a/b/c/+" // different number of levels + T = "/a/b/c" + check(R, T, false) + + R = "+" + T = "a" + check(R, T, true) + + R = "/+" + T = "a" + check(R, T, false) + + R = "+/+" + T = "/a" + check(R, T, true) + + R = "+/+" + T = "a" + check(R, T, false) + + // ** # ** + R = "#" + T = "/a/b/c" + check(R, T, true) + + R = "/#" + T = "/a/b/c" + check(R, T, true) + + // R = "/#/" // not valid + // T = "/a/b/c" + // check(R, T, true) + + R = "/#" + T = "/a/b/c" + check(R, T, true) + + R = "/a/#" + T = "/a/b/c" + check(R, T, true) + + R = "/a/#" + T = "/a/b/c" + check(R, T, true) + + R = "/a/b/#" + T = "/a/b/c" + check(R, T, true) + + // ** unicode ** + R = "☃" + T = "☃" + check(R, T, true) + + R = "✈" + T = "☃" + check(R, T, false) + + R = "/☃/✈" + T = "/☃/ッ" + check(R, T, false) + + R = "#" + T = "/☃/ッ" + check(R, T, true) + + R = "/☃/+" + T = "/☃/ッ/♫/ø/☹☹☹" + check(R, T, false) + + R = "/☃/#" + T = "/☃/ッ/♫/ø/☹☹☹" + check(R, T, true) + + R = "/☃/ッ/♫/ø/+" + T = "/☃/ッ/♫/ø/☹☹☹" + check(R, T, true) + + R = "/☃/ッ/+/ø/☹☹☹" + T = "/☃/ッ/♫/ø/☹☹☹" + check(R, T, true) + + R = "/+/a/ッ/+/ø/☹☹☹" + T = "/b/♫/ッ/♫/ø/☹☹☹" + check(R, T, false) + + R = "/+/♫/ッ/+/ø/☹☹☹" + T = "/b/♫/ッ/♫/ø/☹☹☹" + check(R, T, true) +} + +func Test_MatchAndDispatch(t *testing.T) { + calledback := make(chan bool) + + cb := func(c *Client, m Message) { + calledback <- true + } + + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = 2 + pub.TopicName = "a" + pub.Payload = []byte("foo") + + msgs := make(chan *packets.PublishPacket) + + router, stopper := newRouter() + router.addRoute("a", cb) + + router.matchAndDispatch(msgs, true, nil) + + msgs <- pub + + <-calledback + + stopper <- true + + select { + case msgs <- pub: + t.Errorf("msgs should not have a listener") + default: + } + +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go new file mode 100644 index 000000000..42e7d7ceb --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_store_test.go @@ -0,0 +1,668 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "bufio" + "fmt" + "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets" + "io/ioutil" + "os" + "testing" +) + +func Test_fullpath(t *testing.T) { + p := fullpath("/tmp/store", "o.44324") + e := "/tmp/store/o.44324.msg" + if p != e { + t.Fatalf("full path expected %s, got %s", e, p) + } +} + +func Test_exists(t *testing.T) { + b := exists("/") + if !b { + t.Errorf("/proc/cpuinfo was not found") + } +} + +func Test_exists_no(t *testing.T) { + b := exists("/this/path/is/not/real/i/hope") + if b { + t.Errorf("you have some strange files") + } +} + +func isemptydir(dir string) bool { + chkcond(exists(dir)) + files, err := ioutil.ReadDir(dir) + chkerr(err) + return len(files) == 0 +} + +func Test_mIDFromKey(t *testing.T) { + key := "i.123" + exp := uint16(123) + res := mIDFromKey(key) + if exp != res { + t.Fatalf("mIDFromKey failed") + } +} + +func Test_inboundKeyFromMID(t *testing.T) { + id := uint16(9876) + exp := "i.9876" + res := inboundKeyFromMID(id) + if exp != res { + t.Fatalf("inboundKeyFromMID failed") + } +} + +func Test_outboundKeyFromMID(t *testing.T) { + id := uint16(7654) + exp := "o.7654" + res := outboundKeyFromMID(id) + if exp != res { + t.Fatalf("outboundKeyFromMID failed") + } +} + +/************************ + **** persistOutbound **** + ************************/ + +func Test_persistOutbound_connect(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket) + m.Qos = 0 + m.Username = "user" + m.Password = []byte("pass") + m.ClientIdentifier = "cid" + //m := newConnectMsg(false, false, QOS_ZERO, false, "", nil, "cid", "user", "pass", 10) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_publish_0(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 0 + m.TopicName = "/popub0" + m.Payload = []byte{0xBB, 0x00} + m.MessageID = 40 + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_publish_1(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 1 + m.TopicName = "/popub1" + m.Payload = []byte{0xBB, 0x00} + m.MessageID = 41 + persistOutbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 41 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_publish_2(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 2 + m.TopicName = "/popub2" + m.Payload = []byte{0xBB, 0x00} + m.MessageID = 42 + persistOutbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 42 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_puback(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 1 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_pubrec(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_pubrel(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + m.MessageID = 43 + + persistOutbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 43 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_pubcomp(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 1 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_subscribe(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket) + m.Topics = []string{"/posub"} + m.Qoss = []byte{1} + m.MessageID = 44 + persistOutbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 44 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_unsubscribe(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket) + m.Topics = []string{"/posub"} + m.MessageID = 45 + persistOutbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 45 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_pingreq(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Pingreq) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +func Test_persistOutbound_disconnect(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Disconnect) + persistOutbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistOutbound put message it should not have") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistOutbound get message it should not have") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistOutbound del message it should not have") + } +} + +/************************ + **** persistInbound **** + ************************/ + +func Test_persistInbound_connack(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Connack) + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_publish_0(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 0 + m.TopicName = "/pipub0" + m.Payload = []byte{0xCC, 0x01} + m.MessageID = 50 + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_publish_1(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 1 + m.TopicName = "/pipub1" + m.Payload = []byte{0xCC, 0x02} + m.MessageID = 51 + persistInbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 51 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_publish_2(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + m.Qos = 2 + m.TopicName = "/pipub2" + m.Payload = []byte{0xCC, 0x03} + m.MessageID = 52 + persistInbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 52 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_puback(t *testing.T) { + ts := &TestStore{} + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = 1 + pub.TopicName = "/pub1" + pub.Payload = []byte{0xCC, 0x04} + pub.MessageID = 53 + publishKey := inboundKeyFromMID(pub.MessageID) + ts.Put(publishKey, pub) + + m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket) + m.MessageID = 53 + + persistInbound(ts, m) // "deletes" packets.Publish from store + + if len(ts.mput) != 1 { // not actually deleted in TestStore + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 1 || ts.mdel[0] != 53 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_pubrec(t *testing.T) { + ts := &TestStore{} + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = 2 + pub.TopicName = "/pub2" + pub.Payload = []byte{0xCC, 0x05} + pub.MessageID = 54 + publishKey := inboundKeyFromMID(pub.MessageID) + ts.Put(publishKey, pub) + + m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket) + m.MessageID = 54 + + persistInbound(ts, m) + + if len(ts.mput) != 1 || ts.mput[0] != 54 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_pubrel(t *testing.T) { + ts := &TestStore{} + pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket) + pub.Qos = 2 + pub.TopicName = "/pub2" + pub.Payload = []byte{0xCC, 0x06} + pub.MessageID = 55 + publishKey := inboundKeyFromMID(pub.MessageID) + ts.Put(publishKey, pub) + + m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket) + m.MessageID = 55 + + persistInbound(ts, m) // will overwrite publish + + if len(ts.mput) != 2 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_pubcomp(t *testing.T) { + ts := &TestStore{} + + m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket) + m.MessageID = 56 + + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 1 || ts.mdel[0] != 56 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_suback(t *testing.T) { + ts := &TestStore{} + + m := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket) + m.MessageID = 57 + + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 1 || ts.mdel[0] != 57 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_unsuback(t *testing.T) { + ts := &TestStore{} + + m := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket) + m.MessageID = 58 + + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 1 || ts.mdel[0] != 58 { + t.Fatalf("persistInbound in bad state") + } +} + +func Test_persistInbound_pingresp(t *testing.T) { + ts := &TestStore{} + m := packets.NewControlPacket(packets.Pingresp) + + persistInbound(ts, m) + + if len(ts.mput) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mget) != 0 { + t.Fatalf("persistInbound in bad state") + } + + if len(ts.mdel) != 0 { + t.Fatalf("persistInbound in bad state") + } +} + +/*********** + * restore * + ***********/ + +func ensureRestoreDir() { + if exists("/tmp/restore") { + rerr := os.RemoveAll("/tmp/restore") + chkerr(rerr) + } + os.Mkdir("/tmp/restore", 0766) +} + +func writeToRestore(fname, content string) { + f, cerr := os.Create("/tmp/restore/" + fname) + chkerr(cerr) + chkcond(f != nil) + w := bufio.NewWriter(f) + w.Write([]byte(content)) + w.Flush() + f.Close() +} + +func verifyFromRestore(fname, content string, t *testing.T) { + msg, oerr := os.Open("/tmp/restore/" + fname) + chkerr(oerr) + all, rerr := ioutil.ReadAll(msg) + chkerr(rerr) + msg.Close() + s := string(all) + if s != content { + t.Fatalf("verify content expected `%s` but got `%s`", content, s) + } +} + +func Test_restore_1(t *testing.T) { + ensureRestoreDir() + + writeToRestore("i.1.bkp", "this is critical 1") + + restore("/tmp/restore") + + chkcond(!exists("/tmp/restore/i.1.bkp")) + chkcond(exists("/tmp/restore/i.1.msg")) + + verifyFromRestore("i.1.msg", "this is critical 1", t) +} + +func Test_restore_2(t *testing.T) { + ensureRestoreDir() + + writeToRestore("o.2.msg", "this is critical 2") + + restore("/tmp/restore") + + chkcond(!exists("/tmp/restore/o.2.bkp")) + chkcond(exists("/tmp/restore/o.2.msg")) + + verifyFromRestore("o.2.msg", "this is critical 2", t) +} + +func Test_restore_3(t *testing.T) { + ensureRestoreDir() + + N := 20 + // evens are .msg + // odds are .bkp + for i := 0; i < N; i++ { + content := fmt.Sprintf("foo %d bar", i) + if i%2 == 0 { + mname := fmt.Sprintf("i.%d.msg", i) + writeToRestore(mname, content) + } else { + mname := fmt.Sprintf("i.%d.bkp", i) + writeToRestore(mname, content) + } + } + + restore("/tmp/restore") + + for i := 0; i < N; i++ { + mname := fmt.Sprintf("i.%d.msg", i) + bname := fmt.Sprintf("i.%d.bkp", i) + content := fmt.Sprintf("foo %d bar", i) + chkcond(!exists("/tmp/restore/" + bname)) + chkcond(exists("/tmp/restore/" + mname)) + + verifyFromRestore(mname, content, t) + } +} diff --git a/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go new file mode 100644 index 000000000..da2b240e8 --- /dev/null +++ b/Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/unit_topic_test.go @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2013 IBM Corp. + * + * All rights reserved. This program and the accompanying materials + * are made available under the terms of the Eclipse Public License v1.0 + * which accompanies this distribution, and is available at + * http://www.eclipse.org/legal/epl-v10.html + * + * Contributors: + * Seth Hoenig + * Allan Stockdill-Mander + * Mike Robertson + */ + +package mqtt + +import ( + "testing" +) + +func Test_ValidateTopicAndQos_qos3(t *testing.T) { + e := validateTopicAndQos("a", 3) + if e != ErrInvalidQos { + t.Fatalf("invalid error for invalid qos") + } +} + +func Test_ValidateTopicAndQos_ES(t *testing.T) { + e := validateTopicAndQos("", 0) + if e != ErrInvalidTopicEmptyString { + t.Fatalf("invalid error for empty topic name") + } +} + +func Test_ValidateTopicAndQos_a_0(t *testing.T) { + e := validateTopicAndQos("a", 0) + if e != nil { + t.Fatalf("error from valid NewTopicFilter") + } +} + +func Test_ValidateTopicAndQos_H(t *testing.T) { + e := validateTopicAndQos("a/#/c", 0) + if e != ErrInvalidTopicMultilevel { + t.Fatalf("invalid error for bad multilevel topic filter") + } +} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore b/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore deleted file mode 100644 index 147b88122..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/.gitignore +++ /dev/null @@ -1,67 +0,0 @@ -*~ -src/ - -config.json -/bin/ - -/pkg/ - -TAGS - -# vim temp files -*.swp - -*.test -/query/a.out* -.DS_Store - -# ignore generated files. -cmd/influxd/version.go - -# executables - -influxd -**/influxd -!**/influxd/ - -influx -**/influx -!**/influx/ - -influxdb -**/influxdb -!**/influxdb/ - -/benchmark-tool -/main -/benchmark-storage -godef -gosym -gocode -inspect-raft - -# dependencies -out_rpm/ -packages/ - -# autconf -autom4te.cache/ -config.log -config.status -Makefile - -# log file -influxdb.log -benchmark.log - -# config file -config.toml - -# test data files -integration/migration_data/ - -# goide project files -.idea - -# goconvey config files -*.goconvey diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md deleted file mode 100644 index 89eb85a48..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/CHANGELOG.md +++ /dev/null @@ -1,1607 +0,0 @@ -## v0.9.3 [unreleased] - -### Release Notes - -There are breaking changes in this release. - - To store data points as integers you must now append i to the number if using the line protocol. - - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. - - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdb/influxdb/blob/master/etc/config.sample.toml) for more details. - - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. - -Please see the *Features* section below for full details. - -### Features -- [#3376](https://github.com/influxdb/influxdb/pull/3376): Support for remote shard query mapping -- [#3372](https://github.com/influxdb/influxdb/pull/3372): Support joining nodes to existing cluster -- [#3426](https://github.com/influxdb/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 -- [#3478](https://github.com/influxdb/influxdb/pull/3478): Support incremental cluster joins -- [#3519](https://github.com/influxdb/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers -- [#3529](https://github.com/influxdb/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc -- [#3421](https://github.com/influxdb/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes -- [#3502](https://github.com/influxdb/influxdb/pull/3502): Importer for 0.8.9 data via the CLI -- [#3564](https://github.com/influxdb/influxdb/pull/3564): Fix alias, maintain column sort order -- [#3585](https://github.com/influxdb/influxdb/pull/3585): Additional test coverage for non-existent fields -- [#3246](https://github.com/influxdb/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables -- [#3599](https://github.com/influxdb/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale -- [#3636](https://github.com/influxdb/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 -- [#3641](https://github.com/influxdb/influxdb/pull/3641): Logging enhancements and single-node rename -- [#3635](https://github.com/influxdb/influxdb/pull/3635): Add build branch to version output. -- [#3115](https://github.com/influxdb/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. -- [#3628](https://github.com/influxdb/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries -- [#3721](https://github.com/influxdb/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch -- [#3514](https://github.com/influxdb/influxdb/issues/3514): Implement WAL outside BoltDB with compaction -- [#3544](https://github.com/influxdb/influxdb/pull/3544): Implement compression on top of BoltDB -- [#3795](https://github.com/influxdb/influxdb/pull/3795): Throttle import -- [#3584](https://github.com/influxdb/influxdb/pull/3584): Import/export documenation - -### Bugfixes -- [#3405](https://github.com/influxdb/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 -- [#3411](https://github.com/influxdb/influxdb/issues/3411): 500 timeout on write -- [#3420](https://github.com/influxdb/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. -- [#3404](https://github.com/influxdb/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 -- [#3414](https://github.com/influxdb/influxdb/issues/3414): Shard mappers perform query re-writing -- [#3525](https://github.com/influxdb/influxdb/pull/3525): check if fields are valid during parse time. -- [#3511](https://github.com/influxdb/influxdb/issues/3511): Sending a large number of tag causes panic -- [#3288](https://github.com/influxdb/influxdb/issues/3288): Run go fuzz on the line-protocol input -- [#3545](https://github.com/influxdb/influxdb/issues/3545): Fix parsing string fields with newlines -- [#3579](https://github.com/influxdb/influxdb/issues/3579): Revert breaking change to `client.NewClient` function -- [#3580](https://github.com/influxdb/influxdb/issues/3580): Do not allow wildcards with fields in select statements -- [#3530](https://github.com/influxdb/influxdb/pull/3530): Aliasing a column no longer works -- [#3436](https://github.com/influxdb/influxdb/issues/3436): Fix panic in hinted handoff queue processor -- [#3401](https://github.com/influxdb/influxdb/issues/3401): Derivative on non-numeric fields panics db -- [#3583](https://github.com/influxdb/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic -- [#3611](https://github.com/influxdb/influxdb/pull/3611): Fix query arithmetic with integers -- [#3326](https://github.com/influxdb/influxdb/issues/3326): simple regex query fails with cryptic error -- [#3618](https://github.com/influxdb/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger -- [#3625](https://github.com/influxdb/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement -- [#3629](https://github.com/influxdb/influxdb/pull/3629): Use sensible batching defaults for Graphite. -- [#3638](https://github.com/influxdb/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field -- [#3640](https://github.com/influxdb/influxdb/pull/3640): Shutdown Graphite service when signal received. -- [#3632](https://github.com/influxdb/influxdb/issues/3632): Make single-node host renames more seamless -- [#3656](https://github.com/influxdb/influxdb/issues/3656): Silence snapshotter logger for testing -- [#3651](https://github.com/influxdb/influxdb/pull/3651): Fully remove series when dropped. -- [#3517](https://github.com/influxdb/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. -- [#3522](https://github.com/influxdb/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. -- [#3646](https://github.com/influxdb/influxdb/pull/3646): Fix nil FieldCodec panic. -- [#3672](https://github.com/influxdb/influxdb/pull/3672): Reduce in-memory index by 20%-30% -- [#3673](https://github.com/influxdb/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. -- [#3676](https://github.com/influxdb/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. -- [#3686](https://github.com/influxdb/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. -- [#3687](https://github.com/influxdb/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff -- [#3697](https://github.com/influxdb/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. -- [#3708](https://github.com/influxdb/influxdb/issues/3708): Fix double escaping measurement name during cluster replication -- [#3704](https://github.com/influxdb/influxdb/issues/3704): cluster replication issue for measurement name containing backslash -- [#3681](https://github.com/influxdb/influxdb/issues/3681): Quoted measurement names fail -- [#3681](https://github.com/influxdb/influxdb/issues/3682): Fix inserting string value with backslashes -- [#3735](https://github.com/influxdb/influxdb/issues/3735): Append to small bz1 blocks -- [#3736](https://github.com/influxdb/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme -- [#3539](https://github.com/influxdb/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always -- [#3790](https://github.com/influxdb/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values -- [#3778](https://github.com/influxdb/influxdb/pull/3778): Don't panic if SELECT on time. -- [#3824](https://github.com/influxdb/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types -- [#3828](https://github.com/influxdb/influxdb/pull/3828): Support all number types when decoding a point -- [#3853](https://github.com/influxdb/influxdb/pull/3853): Use 4KB default block size for bz1 -- [#3607](https://github.com/influxdb/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! - -## v0.9.2 [2015-07-24] - -### Features -- [#3177](https://github.com/influxdb/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham -- [#3299](https://github.com/influxdb/influxdb/pull/3299): Refactor query engine for distributed query support. -- [#3334](https://github.com/influxdb/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho - -### Bugfixes - -- [#3180](https://github.com/influxdb/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. -- [#3218](https://github.com/influxdb/influxdb/pull/3218): Allow write timeouts to be configurable. -- [#3184](https://github.com/influxdb/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! -- [#3236](https://github.com/influxdb/influxdb/pull/3236): Fix display issues in admin interface. -- [#3232](https://github.com/influxdb/influxdb/pull/3232): Set logging prefix for metastore. -- [#3230](https://github.com/influxdb/influxdb/issues/3230): panic: unable to parse bool value -- [#3245](https://github.com/influxdb/influxdb/issues/3245): Error using graphite plugin with multiple filters -- [#3223](https://github.com/influxdb/influxdb/issues/323): default graphite template cannot have extra tags -- [#3255](https://github.com/influxdb/influxdb/pull/3255): Flush WAL on start-up as soon as possible. -- [#3289](https://github.com/influxdb/influxdb/issues/3289): InfluxDB crashes on floats without decimal -- [#3298](https://github.com/influxdb/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 -- [#3152](https://github.com/influxdb/influxdb/issues/3159): High CPU Usage with unsorted writes -- [#3307](https://github.com/influxdb/influxdb/pull/3307): Fix regression parsing boolean values True/False -- [#3304](https://github.com/influxdb/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 -- [#3332](https://github.com/influxdb/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. -- [#3335](https://github.com/influxdb/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report -- [#2761](https://github.com/influxdb/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. -- [#3356](https://github.com/influxdb/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. -- [#3351](https://github.com/influxdb/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel -- [#3244](https://github.com/influxdb/influxdb/pull/3244): Wire up admin privilege grant and revoke. -- [#3259](https://github.com/influxdb/influxdb/issues/3259): Respect privileges for queries. -- [#3256](https://github.com/influxdb/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. -- [#3380](https://github.com/influxdb/influxdb/issue/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. -- [#3319](https://github.com/influxdb/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces -- [#3453](https://github.com/influxdb/influxdb/issues/3453): Remove outdated `dump` command from CLI. -- [#3463](https://github.com/influxdb/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. - -## v0.9.1 [2015-07-02] - -### Features - -- [2650](https://github.com/influxdb/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. -- [3125](https://github.com/influxdb/influxdb/pull/3125): Graphite Input Protocol Parsing -- [2746](https://github.com/influxdb/influxdb/pull/2746): New Admin UI/interface -- [3036](https://github.com/influxdb/influxdb/pull/3036): Write Ahead Log (WAL) -- [3014](https://github.com/influxdb/influxdb/issues/3014): Implement Raft snapshots - -### Bugfixes - -- [3013](https://github.com/influxdb/influxdb/issues/3013): Panic error with inserting values with commas -- [#2956](https://github.com/influxdb/influxdb/issues/2956): Type mismatch in derivative -- [#2908](https://github.com/influxdb/influxdb/issues/2908): Field mismatch error messages need to be updated -- [#2931](https://github.com/influxdb/influxdb/pull/2931): Services and reporting should wait until cluster has leader. -- [#2943](https://github.com/influxdb/influxdb/issues/2943): Ensure default retention policies are fully replicated -- [#2948](https://github.com/influxdb/influxdb/issues/2948): Field mismatch error message to include measurement name -- [#2919](https://github.com/influxdb/influxdb/issues/2919): Unable to insert negative floats -- [#2935](https://github.com/influxdb/influxdb/issues/2935): Hook CPU and memory profiling back up. -- [#2960](https://github.com/influxdb/influxdb/issues/2960): Cluster Write Errors. -- [#2928](https://github.com/influxdb/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. -- [#2969](https://github.com/influxdb/influxdb/pull/2969): Actually set HTTP version in responses. -- [#2993](https://github.com/influxdb/influxdb/pull/2993): Don't log each UDP batch. -- [#2994](https://github.com/influxdb/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. -- [#3002](https://github.com/influxdb/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. -- [#3021](https://github.com/influxdb/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. -- [#3027](https://github.com/influxdb/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. -- [#3030](https://github.com/influxdb/influxdb/pull/3030): Fix excessive logging of shard creation. -- [#3038](https://github.com/influxdb/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. -- [#3033](https://github.com/influxdb/influxdb/pull/3033): Add support for marshaling `uint64` in client. -- [#3090](https://github.com/influxdb/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. -- [#2944](https://github.com/influxdb/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. -- [#3075](https://github.com/influxdb/influxdb/pull/3075): GROUP BY correctly when different tags have same value. -- [#3078](https://github.com/influxdb/influxdb/pull/3078): Fix CLI panic on malformed INSERT. -- [#2102](https://github.com/influxdb/influxdb/issues/2102): Re-work Graphite input and metric processing -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3136](https://github.com/influxdb/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. -- [#2996](https://github.com/influxdb/influxdb/issues/2996): Graphite Input Parsing -- [#3127](https://github.com/influxdb/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd -- [#3131](https://github.com/influxdb/influxdb/pull/3131): Copy batch tags to each point before marshalling -- [#3155](https://github.com/influxdb/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. -- [#2678](https://github.com/influxdb/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value -- [#3061](https://github.com/influxdb/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database -- [#2608](https://github.com/influxdb/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic -- [#3183](https://github.com/influxdb/influxdb/issues/3183): using line protocol measurement names cannot contain commas -- [#3193](https://github.com/influxdb/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd -- [#3102](https://github.com/influxdb/influxdb/issues/3102): Add authentication cache -- [#3209](https://github.com/influxdb/influxdb/pull/3209): Dump Run() errors to stderr -- [#3217](https://github.com/influxdb/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. - -## v0.9.0 [2015-06-11] - -### Bugfixes - -- [#2869](https://github.com/influxdb/influxdb/issues/2869): Adding field to existing measurement causes panic -- [#2849](https://github.com/influxdb/influxdb/issues/2849): RC32: Frequent write errors -- [#2700](https://github.com/influxdb/influxdb/issues/2700): Incorrect error message in database EncodeFields -- [#2897](https://github.com/influxdb/influxdb/pull/2897): Ensure target Graphite database exists -- [#2898](https://github.com/influxdb/influxdb/pull/2898): Ensure target openTSDB database exists -- [#2895](https://github.com/influxdb/influxdb/pull/2895): Use Graphite input defaults where necessary -- [#2900](https://github.com/influxdb/influxdb/pull/2900): Use openTSDB input defaults where necessary -- [#2886](https://github.com/influxdb/influxdb/issues/2886): Refactor backup & restore -- [#2804](https://github.com/influxdb/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! -- [#2906](https://github.com/influxdb/influxdb/pull/2906): Restrict replication factor to the cluster size -- [#2905](https://github.com/influxdb/influxdb/pull/2905): Restrict clusters to 3 peers -- [#2904](https://github.com/influxdb/influxdb/pull/2904): Re-enable server reporting. -- [#2917](https://github.com/influxdb/influxdb/pull/2917): Fix int64 field values. -- [#2920](https://github.com/influxdb/influxdb/issues/2920): Ensure collectd database exists - -## v0.9.0-rc33 [2015-06-09] - -### Bugfixes - -- [#2816](https://github.com/influxdb/influxdb/pull/2816): Enable UDP service. Thanks @renan- -- [#2824](https://github.com/influxdb/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao -- [#2823](https://github.com/influxdb/influxdb/pull/2823): Convert OpenTSDB to a service. -- [#2838](https://github.com/influxdb/influxdb/pull/2838): Set auto-created retention policy period to infinite. -- [#2829](https://github.com/influxdb/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. -- [#2814](https://github.com/influxdb/influxdb/issues/2814): Convert collectd to a service. -- [#2852](https://github.com/influxdb/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo -- [#2857](https://github.com/influxdb/influxdb/issues/2857): Fix parsing commas in string field values. -- [#2833](https://github.com/influxdb/influxdb/pull/2833): Make the default config valid. -- [#2859](https://github.com/influxdb/influxdb/pull/2859): Fix panic on aggregate functions. -- [#2878](https://github.com/influxdb/influxdb/pull/2878): Re-enable shard precreation. -- [2865](https://github.com/influxdb/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. - -### Features -- [2858](https://github.com/influxdb/influxdb/pull/2858): Support setting openTSDB write consistency. - -## v0.9.0-rc32 [2015-06-07] - -### Release Notes - -This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. - -### Features -- [#1997](https://github.com/influxdb/influxdb/pull/1997): Update SELECT * to return tag values. -- [#2599](https://github.com/influxdb/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. -- [#2682](https://github.com/influxdb/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md -- [#2683](https://github.com/influxdb/influxdb/issues/2683): Add batching support to Graphite inputs. -- [#2687](https://github.com/influxdb/influxdb/issues/2687): Add batching support to Collectd inputs. -- [#2696](https://github.com/influxdb/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. -- [#2751](https://github.com/influxdb/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. -- [#2684](https://github.com/influxdb/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! - -### Bugfixes -- [#2776](https://github.com/influxdb/influxdb/issues/2776): Re-implement retention policy enforcement. -- [#2635](https://github.com/influxdb/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. -- [#2644](https://github.com/influxdb/influxdb/issues/2644): Make SHOW queries work with FROM //. -- [#2501](https://github.com/influxdb/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart -- [#2647](https://github.com/influxdb/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! - -## v0.9.0-rc31 [2015-05-21] - -### Features -- [#1822](https://github.com/influxdb/influxdb/issues/1822): Wire up DERIVATIVE aggregate -- [#1477](https://github.com/influxdb/influxdb/issues/1477): Wire up non_negative_derivative function -- [#2557](https://github.com/influxdb/influxdb/issues/2557): Fix false positive error with `GROUP BY time` -- [#1891](https://github.com/influxdb/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate -- [#1989](https://github.com/influxdb/influxdb/issues/1989): Implement `SELECT tagName FROM m` - -### Bugfixes -- [#2545](https://github.com/influxdb/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. -- [#2558](https://github.com/influxdb/influxdb/pull/2558): Fix client response check - thanks @vladlopes! -- [#2566](https://github.com/influxdb/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. -- [#2602](https://github.com/influxdb/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. -- [#2610](https://github.com/influxdb/influxdb/pull/2610): Fix shard group creation -- [#2596](https://github.com/influxdb/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. -- [#2592](https://github.com/influxdb/influxdb/pull/2592): Should return an error if user attempts to group by a field. -- [#2499](https://github.com/influxdb/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. -- [#2612](https://github.com/influxdb/influxdb/pull/2612): Query planner should validate distinct is passed a field. -- [#2531](https://github.com/influxdb/influxdb/issues/2531): Fix select with 3 or more terms in where clause. -- [#2564](https://github.com/influxdb/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. - -## PRs -- [#2569](https://github.com/influxdb/influxdb/pull/2569): Add derivative functions -- [#2598](https://github.com/influxdb/influxdb/pull/2598): Implement tag support in SELECT statements -- [#2624](https://github.com/influxdb/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. - -## v0.9.0-rc30 [2015-05-12] - -### Release Notes - -This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. - -### Features -- [#2254](https://github.com/influxdb/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate -- [#2525](https://github.com/influxdb/influxdb/pull/2525): Serve broker diagnostics over HTTP -- [#2186](https://github.com/influxdb/influxdb/pull/2186): The default status code for queries is now `200 OK` -- [#2298](https://github.com/influxdb/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! -- [#2549](https://github.com/influxdb/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. -- [#2568](https://github.com/influxdb/influxdb/pull/2568): Wire up SELECT DISTINCT. - -### Bugfixes -- [#2535](https://github.com/influxdb/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. -- [#2521](https://github.com/influxdb/influxdb/pull/2521): Don't truncate topic data until fully replicated. -- [#2509](https://github.com/influxdb/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart -- [#2536](https://github.com/influxdb/influxdb/issues/2532): Set leader ID on restart of single-node cluster. -- [#2448](https://github.com/influxdb/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! -- [#2108](https://github.com/influxdb/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! -- [#2539](https://github.com/influxdb/influxdb/issues/2539): Add additional vote request logging. -- [#2541](https://github.com/influxdb/influxdb/issues/2541): Update messaging client connection index with every message. -- [#2542](https://github.com/influxdb/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. -- [#2548](https://github.com/influxdb/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. -- [#2487](https://github.com/influxdb/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! -- [#2552](https://github.com/influxdb/influxdb/issues/2552): Run CQ that is actually passed into go-routine. -- [#2553](https://github.com/influxdb/influxdb/issues/2553): Fix race condition during CQ execution. -- [#2557](https://github.com/influxdb/influxdb/issues/2557): RC30 WHERE time filter Regression. - -## v0.9.0-rc29 [2015-05-05] - -### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. -- [#2469](https://github.com/influxdb/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. -- [#1824](https://github.com/influxdb/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! - -### Bugfixes -- [#2446](https://github.com/influxdb/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart -- [#2452](https://github.com/influxdb/influxdb/issues/2452): Fix panic with shard stats on multiple clusters -- [#2453](https://github.com/influxdb/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). -- [#2460](https://github.com/influxdb/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick -- [#2465](https://github.com/influxdb/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz -- [#2475](https://github.com/influxdb/influxdb/pull/2475): RLock server when checking if shards groups are required during write. -- [#2471](https://github.com/influxdb/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart -- [#2281](https://github.com/influxdb/influxdb/issues/2281): Fix Bad Escape error when parsing regex - -## v0.9.0-rc28 [2015-04-27] - -### Features -- [#2410](https://github.com/influxdb/influxdb/pull/2410) Allow configuration of Raft timers -- [#2354](https://github.com/influxdb/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! - -### Bugfixes -- [#2374](https://github.com/influxdb/influxdb/issues/2374): Two different panics during SELECT percentile -- [#2404](https://github.com/influxdb/influxdb/pull/2404): Mean and percentile function fixes -- [#2408](https://github.com/influxdb/influxdb/pull/2408): Fix snapshot 500 error -- [#1896](https://github.com/influxdb/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop -- [#2418](https://github.com/influxdb/influxdb/pull/2418): Fix raft node getting stuck in candidate state -- [#2415](https://github.com/influxdb/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. -- [#2426](https://github.com/influxdb/influxdb/pull/2426): Fix race condition around listener address in Graphite server. -- [#2429](https://github.com/influxdb/influxdb/pull/2429): Ensure no field value is null. -- [#2431](https://github.com/influxdb/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils -- [#2441](https://github.com/influxdb/influxdb/pull/2441): Correctly release server RLock during "drop series". -- [#2445](https://github.com/influxdb/influxdb/pull/2445): Read locks and data race fixes - -## v0.9.0-rc27 [04-23-2015] - -### Features -- [#2398](https://github.com/influxdb/influxdb/pull/2398) Track more stats and report errors for shards. - -### Bugfixes -- [#2370](https://github.com/influxdb/influxdb/pull/2370): Fix data race in openTSDB endpoint. -- [#2371](https://github.com/influxdb/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 -- [#2372](https://github.com/influxdb/influxdb/pull/2372): Fix data race in graphite endpoint. -- [#2373](https://github.com/influxdb/influxdb/pull/2373): Actually allow HTTP logging to be controlled. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. -- [#2376](https://github.com/influxdb/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. -- [#2386](https://github.com/influxdb/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times -- [#2393](https://github.com/influxdb/influxdb/pull/2393): Fix default hostname for connecting to cluster. -- [#2390](https://github.com/influxdb/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! -- [#2391](https://github.com/influxdb/influxdb/pull/2391): Unable to write points through Go client when authentication enabled -- [#2400](https://github.com/influxdb/influxdb/pull/2400): Always send auth headers for client requests if present - -## v0.9.0-rc26 [04-21-2015] - -### Features -- [#2301](https://github.com/influxdb/influxdb/pull/2301): Distributed query load balancing and failover -- [#2336](https://github.com/influxdb/influxdb/pull/2336): Handle distributed queries when shards != data nodes -- [#2353](https://github.com/influxdb/influxdb/pull/2353): Distributed Query/Clustering Fixes - -### Bugfixes -- [#2297](https://github.com/influxdb/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. -- [#2312](https://github.com/influxdb/influxdb/pull/2312): Re-use httpclient for continuous queries -- [#2318](https://github.com/influxdb/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. -- [#2242](https://github.com/influxdb/influxdb/pull/2242): Distributed Query should balance requests -- [#2243](https://github.com/influxdb/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ -- [#2190](https://github.com/influxdb/influxdb/pull/2190): Implement failover to other data nodes for distributed queries -- [#2324](https://github.com/influxdb/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() -- [#2325](https://github.com/influxdb/influxdb/pull/2325): Cluster open fixes -- [#2326](https://github.com/influxdb/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY -- [#2300](https://github.com/influxdb/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. -- [#2338](https://github.com/influxdb/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been -- [#2340](https://github.com/influxdb/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. -- [#2351](https://github.com/influxdb/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. -- [#2348](https://github.com/influxdb/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 -- [#2343](https://github.com/influxdb/influxdb/pull/2343): Node falls behind Metastore updates -- [#2334](https://github.com/influxdb/influxdb/pull/2334): Test Partial replication is very problematic -- [#2272](https://github.com/influxdb/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a -- [#2350](https://github.com/influxdb/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. -- [#2367](https://github.com/influxdb/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. - -## v0.9.0-rc25 [2015-04-15] - -### Bugfixes -- [#2282](https://github.com/influxdb/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. -- [#2283](https://github.com/influxdb/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. -- [#2293](https://github.com/influxdb/influxdb/pull/2293): Open cluster listener before starting broker. -- [#2287](https://github.com/influxdb/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. -- [#2288](https://github.com/influxdb/influxdb/pull/2288): Fix expression parsing bug. -- [#2294](https://github.com/influxdb/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). - -## Features -- [#2276](https://github.com/influxdb/influxdb/pull/2276): Broker topic truncation. -- [#2292](https://github.com/influxdb/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! -- [#2290](https://github.com/influxdb/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! -- [#2295](https://github.com/influxdb/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! -- [#2246](https://github.com/influxdb/influxdb/pull/2246): Allow HTTP logging to be controlled. - -## v0.9.0-rc24 [2015-04-13] - -### Bugfixes -- [#2255](https://github.com/influxdb/influxdb/pull/2255): Fix panic when changing default retention policy. -- [#2257](https://github.com/influxdb/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. -- [#2261](https://github.com/influxdb/influxdb/pull/2261): Support int64 value types. -- [#2191](https://github.com/influxdb/influxdb/pull/2191): Case-insensitive check for "fill" -- [#2274](https://github.com/influxdb/influxdb/pull/2274): Snapshot and HTTP API endpoints -- [#2265](https://github.com/influxdb/influxdb/pull/2265): Fix auth for CLI. - -## v0.9.0-rc23 [2015-04-11] - -### Features -- [#2202](https://github.com/influxdb/influxdb/pull/2202): Initial implementation of Distributed Queries -- [#2202](https://github.com/influxdb/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. - -### Bugfixes -- [#2225](https://github.com/influxdb/influxdb/pull/2225): Make keywords completely case insensitive -- [#2228](https://github.com/influxdb/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement -- [#2236](https://github.com/influxdb/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof -- [#2213](https://github.com/influxdb/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. - -## v0.9.0-rc22 [2015-04-09] - -### Features -- [#2214](https://github.com/influxdb/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g - -### Bugfixes -- [#2223](https://github.com/influxdb/influxdb/pull/2223): Always notify term change on RequestVote - -## v0.9.0-rc21 [2015-04-09] - -### Features -- [#870](https://github.com/influxdb/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate -- [#2180](https://github.com/influxdb/influxdb/pull/2180): Allow http write handler to decode gzipped body -- [#2175](https://github.com/influxdb/influxdb/pull/2175): Separate broker and data nodes -- [#2158](https://github.com/influxdb/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g -- [#2201](https://github.com/influxdb/influxdb/pull/2201): Bring back config join URLs -- [#2121](https://github.com/influxdb/influxdb/pull/2121): Parser refactor - -### Bugfixes -- [#2181](https://github.com/influxdb/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". -- [#2170](https://github.com/influxdb/influxdb/pull/2170): Make sure queries on missing tags return 200 status. -- [#2197](https://github.com/influxdb/influxdb/pull/2197): Lock server during Open(). -- [#2200](https://github.com/influxdb/influxdb/pull/2200): Re-enable Continuous Queries. -- [#2203](https://github.com/influxdb/influxdb/pull/2203): Fix race condition on continuous queries. -- [#2217](https://github.com/influxdb/influxdb/pull/2217): Only revert to follower if new term is greater. -- [#2219](https://github.com/influxdb/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium - -## v0.9.0-rc20 [2015-04-04] - -### Features -- [#2128](https://github.com/influxdb/influxdb/pull/2128): Data node discovery from brokers -- [#2142](https://github.com/influxdb/influxdb/pull/2142): Support chunked queries -- [#2154](https://github.com/influxdb/influxdb/pull/2154): Node redirection -- [#2168](https://github.com/influxdb/influxdb/pull/2168): Return raft term from vote, add term logging - -### Bugfixes -- [#2147](https://github.com/influxdb/influxdb/pull/2147): Set Go Max procs in a better location -- [#2137](https://github.com/influxdb/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. -- [#2151](https://github.com/influxdb/influxdb/pull/2151): Ignore replay commands on the metastore. -- [#2152](https://github.com/influxdb/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' -- [#2156](https://github.com/influxdb/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. -- [#2163](https://github.com/influxdb/influxdb/pull/2163): Fix up paths for default data and run storage. -- [#2164](https://github.com/influxdb/influxdb/pull/2164): Append STDOUT/STDERR in initscript. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Better name for config section for stats and diags. -- [#2165](https://github.com/influxdb/influxdb/pull/2165): Monitoring database and retention policy are not configurable. -- [#2167](https://github.com/influxdb/influxdb/pull/2167): Add broker log recovery. -- [#2166](https://github.com/influxdb/influxdb/pull/2166): Don't panic if presented with a field of unknown type. -- [#2149](https://github.com/influxdb/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. -- [#2150](https://github.com/influxdb/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. - -## v0.9.0-rc19 [2015-04-01] - -### Features -- [#2143](https://github.com/influxdb/influxdb/pull/2143): Add raft term logging. - -### Bugfixes -- [#2145](https://github.com/influxdb/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. - -## v0.9.0-rc18 [2015-03-31] - -### Bugfixes -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Use channel to synchronize collectd shutdown. -- [#2100](https://github.com/influxdb/influxdb/pull/2100): Synchronize access to shard index. -- [#2131](https://github.com/influxdb/influxdb/pull/2131): Optimize marshalTags(). -- [#2130](https://github.com/influxdb/influxdb/pull/2130): Make fewer calls to marshalTags(). -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. -- [#2105](https://github.com/influxdb/influxdb/pull/2105): Support !~ tags values. -- [#2138](https://github.com/influxdb/influxdb/pull/2136): Use map for marshaledTags cache. - -## v0.9.0-rc17 [2015-03-29] - -### Features -- [#2076](https://github.com/influxdb/influxdb/pull/2076): Separate stdout and stderr output in init.d script -- [#2091](https://github.com/influxdb/influxdb/pull/2091): Support disabling snapshot endpoint. -- [#2081](https://github.com/influxdb/influxdb/pull/2081): Support writing diagnostic data into the internal database. -- [#2095](https://github.com/influxdb/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed - -### Bugfixes -- [#2093](https://github.com/influxdb/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed -- [#2084](https://github.com/influxdb/influxdb/pull/2084): Allowing leading underscores in identifiers. -- [#2080](https://github.com/influxdb/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. -- [#2101](https://github.com/influxdb/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". -- [#2104](https://github.com/influxdb/influxdb/pull/2104): Include NEQ when calculating field filters. -- [#2112](https://github.com/influxdb/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. -- [#2111](https://github.com/influxdb/influxdb/pull/2111) and [#2025](https://github.com/influxdb/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. -- [#2114](https://github.com/influxdb/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. - -## v0.9.0-rc16 [2015-03-24] - -### Features -- [#2058](https://github.com/influxdb/influxdb/pull/2058): Track number of queries executed in stats. -- [#2059](https://github.com/influxdb/influxdb/pull/2059): Retention policies sorted by name on return to client. -- [#2061](https://github.com/influxdb/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. -- [#2064](https://github.com/influxdb/influxdb/pull/2064): Allow init.d script to return influxd version. -- [#2053](https://github.com/influxdb/influxdb/pull/2053): Implment backup and restore. -- [#1631](https://github.com/influxdb/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. - -### Bugfixes -- [#2037](https://github.com/influxdb/influxdb/pull/2037): Don't check 'configExists' at Run() level. -- [#2039](https://github.com/influxdb/influxdb/pull/2039): Don't panic if getting current user fails. -- [#2034](https://github.com/influxdb/influxdb/pull/2034): GROUP BY should require an aggregate. -- [#2040](https://github.com/influxdb/influxdb/pull/2040): Add missing top-level help for config command. -- [#2057](https://github.com/influxdb/influxdb/pull/2057): Move racy "in order" test to integration test suite. -- [#2060](https://github.com/influxdb/influxdb/pull/2060): Reload server shard map on restart. -- [#2068](https://github.com/influxdb/influxdb/pull/2068): Fix misspelled JSON field. -- [#2067](https://github.com/influxdb/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. - -## v0.9.0-rc15 [2015-03-19] - -### Features -- [#2000](https://github.com/influxdb/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. -- [#2007](https://github.com/influxdb/influxdb/pull/2007): Track shard-level stats. - -### Bugfixes -- [#2001](https://github.com/influxdb/influxdb/pull/2001): Ensure measurement not found returns status code 200. -- [#1985](https://github.com/influxdb/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. -- [#2003](https://github.com/influxdb/influxdb/pull/2003): Set timestamp when writing monitoring stats. -- [#2004](https://github.com/influxdb/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). -- [#2016](https://github.com/influxdb/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann -- [#2021](https://github.com/influxdb/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern - - -## v0.9.0-rc14 [2015-03-18] - -### Bugfixes -- [#1999](https://github.com/influxdb/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. - -## v0.9.0-rc13 [2015-03-17] - -### Features -- [#1974](https://github.com/influxdb/influxdb/pull/1974): Add time taken for request to the http server logs. - -### Bugfixes -- [#1971](https://github.com/influxdb/influxdb/pull/1971): Fix leader id initialization. -- [#1975](https://github.com/influxdb/influxdb/pull/1975): Require `q` parameter for query endpoint. -- [#1969](https://github.com/influxdb/influxdb/pull/1969): Print loaded config. -- [#1987](https://github.com/influxdb/influxdb/pull/1987): Fix config print startup statement for when no config is provided. -- [#1990](https://github.com/influxdb/influxdb/pull/1990): Drop measurement was taking too long due to transactions. - -## v0.9.0-rc12 [2015-03-15] - -### Bugfixes -- [#1942](https://github.com/influxdb/influxdb/pull/1942): Sort wildcard names. -- [#1957](https://github.com/influxdb/influxdb/pull/1957): Graphite numbers are always float64. -- [#1955](https://github.com/influxdb/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio -- [#1952](https://github.com/influxdb/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio - -### Features -- [#1935](https://github.com/influxdb/influxdb/pull/1935): Implement stateless broker for Raft. -- [#1936](https://github.com/influxdb/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring - -### Features -- [#1909](https://github.com/influxdb/influxdb/pull/1909): Implement a dump command. - -## v0.9.0-rc11 [2015-03-13] - -### Bugfixes -- [#1917](https://github.com/influxdb/influxdb/pull/1902): Creating Infinite Retention Policy Failed. -- [#1758](https://github.com/influxdb/influxdb/pull/1758): Add Graphite Integration Test. -- [#1929](https://github.com/influxdb/influxdb/pull/1929): Default Retention Policy incorrectly auto created. -- [#1930](https://github.com/influxdb/influxdb/pull/1930): Auto create database for graphite if not specified. -- [#1908](https://github.com/influxdb/influxdb/pull/1908): Cosmetic CLI output fixes. -- [#1931](https://github.com/influxdb/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. -- [#1937](https://github.com/influxdb/influxdb/pull/1937): OFFSET should be allowed to be 0. - -### Features -- [#1902](https://github.com/influxdb/influxdb/pull/1902): Enforce retention policies to have a minimum duration. -- [#1906](https://github.com/influxdb/influxdb/pull/1906): Add show servers to query language. -- [#1925](https://github.com/influxdb/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. - -## v0.9.0-rc10 [2015-03-09] - -### Bugfixes -- [#1867](https://github.com/influxdb/influxdb/pull/1867): Fix race accessing topic replicas map -- [#1864](https://github.com/influxdb/influxdb/pull/1864): fix race in startStateLoop -- [#1753](https://github.com/influxdb/influxdb/pull/1874): Do Not Panic on Missing Dirs -- [#1877](https://github.com/influxdb/influxdb/pull/1877): Broker clients track broker leader -- [#1862](https://github.com/influxdb/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin -- [#1883](https://github.com/influxdb/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha -- [#1868](https://github.com/influxdb/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. -- [#1881](https://github.com/influxdb/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. -- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select - -### Features -- [#1875](https://github.com/influxdb/influxdb/pull/1875): Support trace logging of Raft. -- [#1895](https://github.com/influxdb/influxdb/pull/1895): Auto-create a retention policy when a database is created. -- [#1897](https://github.com/influxdb/influxdb/pull/1897): Pre-create shard groups. -- [#1900](https://github.com/influxdb/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` - -## v0.9.0-rc9 [2015-03-06] - -### Bugfixes -- [#1872](https://github.com/influxdb/influxdb/pull/1872): Fix "stale term" errors with raft - -## v0.9.0-rc8 [2015-03-05] - -### Bugfixes -- [#1836](https://github.com/influxdb/influxdb/pull/1836): Store each parsed shell command in history file. -- [#1789](https://github.com/influxdb/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh -- [#1859](https://github.com/influxdb/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist - -### Features -- [#1755](https://github.com/influxdb/influxdb/pull/1848): Support JSON data ingest over UDP -- [#1857](https://github.com/influxdb/influxdb/pull/1857): Support retention policies with infinite duration -- [#1858](https://github.com/influxdb/influxdb/pull/1858): Enable detailed tracing of write path - -## v0.9.0-rc7 [2015-03-02] - -### Features -- [#1813](https://github.com/influxdb/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. -- [#1826](https://github.com/influxdb/influxdb/pull/1826), [#1827](https://github.com/influxdb/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. - -### Bugfixes - -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh -- [#1809](https://github.com/influxdb/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos - -## v0.9.0-rc6 [2015-02-27] - -### Bugfixes - -- [#1780](https://github.com/influxdb/influxdb/pull/1780): Malformed identifiers get through the parser -- [#1775](https://github.com/influxdb/influxdb/pull/1775): Panic "index out of range" on some queries -- [#1744](https://github.com/influxdb/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. - -## v0.9.0-rc5 [2015-02-27] - -### Bugfixes - -- [#1752](https://github.com/influxdb/influxdb/pull/1752): remove debug log output from collectd. -- [#1720](https://github.com/influxdb/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. -- [#1767](https://github.com/influxdb/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. -- [#1773](https://github.com/influxdb/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval -- [#1771](https://github.com/influxdb/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` - -### Features - -- [#1698](https://github.com/influxdb/influxdb/pull/1698): Wire up DROP MEASUREMENT - -## v0.9.0-rc4 [2015-02-24] - -### Bugfixes - -- Fix authentication issue with continuous queries -- Print version in the log on startup - -## v0.9.0-rc3 [2015-02-23] - -### Features - -- [#1659](https://github.com/influxdb/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' -- [#1580](https://github.com/influxdb/influxdb/pull/1580): Add support for fields with bool, int, or string data types -- [#1687](https://github.com/influxdb/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE -- [#1629](https://github.com/influxdb/influxdb/pull/1629): Add support for `DROP SERIES` queries -- [#1632](https://github.com/influxdb/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement -- [#1689](https://github.com/influxdb/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE -- [#1699](https://github.com/influxdb/influxdb/pull/1699): Add CPU and memory profiling options to daemon -- [#1672](https://github.com/influxdb/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work -- [#1591](https://github.com/influxdb/influxdb/pull/1591): Add `spread` aggregate function -- [#1576](https://github.com/influxdb/influxdb/pull/1576): Add `first` and `last` aggregate functions -- [#1573](https://github.com/influxdb/influxdb/pull/1573): Add `stddev` aggregate function -- [#1565](https://github.com/influxdb/influxdb/pull/1565): Add the admin interface back into the server and update for new API -- [#1562](https://github.com/influxdb/influxdb/pull/1562): Enforce retention policies -- [#1700](https://github.com/influxdb/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE -- [#1706](https://github.com/influxdb/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause - -### Bugfixes - -- [#1636](https://github.com/influxdb/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE -- [#1701](https://github.com/influxdb/influxdb/pull/1701), [#1667](https://github.com/influxdb/influxdb/pull/1667), [#1663](https://github.com/influxdb/influxdb/pull/1663), [#1615](https://github.com/influxdb/influxdb/pull/1615): Raft fixes -- [#1644](https://github.com/influxdb/influxdb/pull/1644): Add batching support for significantly improved write performance -- [#1704](https://github.com/influxdb/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) -- [#1718](https://github.com/influxdb/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field -- [#1806](https://github.com/influxdb/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. - - -## v0.9.0-rc1,2 [no public release] - -### Features - -- Support for tags added -- New queries for showing measurement names, tag keys, and tag values -- Renamed shard spaces to retention policies -- Deprecated matching against regex in favor of explicit writing and querying on retention policies -- Pure Go InfluxQL parser -- Switch to BoltDB as underlying datastore -- BoltDB backed metastore to store schema information -- Updated HTTP API to only have two endpoints `/query` and `/write` -- Added all administrative functions to the query language -- Change cluster architecture to have brokers and data nodes -- Switch to streaming Raft implementation -- In memory inverted index of the tag data -- Pure Go implementation! - -## v0.8.6 [2014-11-15] - -### Features - -- [Issue #973](https://github.com/influxdb/influxdb/issues/973). Support - joining using a regex or list of time series -- [Issue #1068](https://github.com/influxdb/influxdb/issues/1068). Print - the processor chain when the query is started - -### Bugfixes - -- [Issue #584](https://github.com/influxdb/influxdb/issues/584). Don't - panic if the process died while initializing -- [Issue #663](https://github.com/influxdb/influxdb/issues/663). Make - sure all sub servies are closed when are stopping InfluxDB -- [Issue #671](https://github.com/influxdb/influxdb/issues/671). Fix - the Makefile package target for Mac OSX -- [Issue #800](https://github.com/influxdb/influxdb/issues/800). Use - su instead of sudo in the init script. This fixes the startup problem - on RHEL 6. -- [Issue #925](https://github.com/influxdb/influxdb/issues/925). Don't - generate invalid query strings for single point queries -- [Issue #943](https://github.com/influxdb/influxdb/issues/943). Don't - take two snapshots at the same time -- [Issue #947](https://github.com/influxdb/influxdb/issues/947). Exit - nicely if the daemon doesn't have permission to write to the log. -- [Issue #959](https://github.com/influxdb/influxdb/issues/959). Stop using - closed connections in the protobuf client. -- [Issue #978](https://github.com/influxdb/influxdb/issues/978). Check - for valgrind and mercurial in the configure script -- [Issue #996](https://github.com/influxdb/influxdb/issues/996). Fill should - fill the time range even if no points exists in the given time range -- [Issue #1008](https://github.com/influxdb/influxdb/issues/1008). Return - an appropriate exit status code depending on whether the process exits - due to an error or exits gracefully. -- [Issue #1024](https://github.com/influxdb/influxdb/issues/1024). Hitting - open files limit causes influxdb to create shards in loop. -- [Issue #1069](https://github.com/influxdb/influxdb/issues/1069). Fix - deprecated interface endpoint in Admin UI. -- [Issue #1076](https://github.com/influxdb/influxdb/issues/1076). Fix - the timestamps of data points written by the collectd plugin. (Thanks, - @renchap for reporting this bug) -- [Issue #1078](https://github.com/influxdb/influxdb/issues/1078). Make sure - we don't resurrect shard directories for shards that have already expired -- [Issue #1085](https://github.com/influxdb/influxdb/issues/1085). Set - the connection string of the local raft node -- [Issue #1092](https://github.com/influxdb/influxdb/issues/1093). Set - the connection string of the local node in the raft snapshot. -- [Issue #1100](https://github.com/influxdb/influxdb/issues/1100). Removing - a non-existent shard space causes the cluster to panic. -- [Issue #1113](https://github.com/influxdb/influxdb/issues/1113). A nil - engine.ProcessorChain causes a panic. - -## v0.8.5 [2014-10-27] - -### Features - -- [Issue #1055](https://github.com/influxdb/influxdb/issues/1055). Allow - graphite and collectd input plugins to have separate binding address - -### Bugfixes - -- [Issue #1058](https://github.com/influxdb/influxdb/issues/1058). Use - the query language instead of the continuous query endpoints that - were removed in 0.8.4 -- [Issue #1022](https://github.com/influxdb/influxdb/issues/1022). Return - an +Inf or NaN instead of panicing when we encounter a divide by zero -- [Issue #821](https://github.com/influxdb/influxdb/issues/821). Don't - scan through points when we hit the limit -- [Issue #1051](https://github.com/influxdb/influxdb/issues/1051). Fix - timestamps when the collectd is used and low resolution timestamps - is set. - -## v0.8.4 [2014-10-24] - -### Bugfixes - -- Remove the continuous query api endpoints since the query language - has all the features needed to list and delete continuous queries. -- [Issue #778](https://github.com/influxdb/influxdb/issues/778). Selecting - from a non-existent series should give a better error message indicating - that the series doesn't exist -- [Issue #988](https://github.com/influxdb/influxdb/issues/988). Check - the arguments of `top()` and `bottom()` -- [Issue #1021](https://github.com/influxdb/influxdb/issues/1021). Make - redirecting to standard output and standard error optional instead of - going to `/dev/null`. This can now be configured by setting `$STDOUT` - in `/etc/default/influxdb` -- [Issue #985](https://github.com/influxdb/influxdb/issues/985). Make - sure we drop a shard only when there's no one using it. Otherwise, the - shard can be closed when another goroutine is writing to it which will - cause random errors and possibly corruption of the database. - -### Features - -- [Issue #1047](https://github.com/influxdb/influxdb/issues/1047). Allow - merge() to take a list of series (as opposed to a regex in #72) - -## v0.8.4-rc.1 [2014-10-21] - -### Bugfixes - -- [Issue #1040](https://github.com/influxdb/influxdb/issues/1040). Revert - to older raft snapshot if the latest one is corrupted -- [Issue #1004](https://github.com/influxdb/influxdb/issues/1004). Querying - for data outside of existing shards returns an empty response instead of - throwing a `Couldn't lookup columns` error -- [Issue #1020](https://github.com/influxdb/influxdb/issues/1020). Change - init script exit codes to conform to the lsb standards. (Thanks, @spuder) -- [Issue #1011](https://github.com/influxdb/influxdb/issues/1011). Fix - the tarball for homebrew so that rocksdb is included and the directory - structure is clean -- [Issue #1007](https://github.com/influxdb/influxdb/issues/1007). Fix - the content type when an error occurs and the client requests - compression. -- [Issue #916](https://github.com/influxdb/influxdb/issues/916). Set - the ulimit in the init script with a way to override the limit -- [Issue #742](https://github.com/influxdb/influxdb/issues/742). Fix - rocksdb for Mac OSX -- [Issue #387](https://github.com/influxdb/influxdb/issues/387). Aggregations - with group by time(1w), time(1m) and time(1y) (for week, month and - year respectively) will cause the start time and end time of the bucket - to fall on the logical boundaries of the week, month or year. -- [Issue #334](https://github.com/influxdb/influxdb/issues/334). Derivative - for queries with group by time() and fill(), will take the difference - between the first value in the bucket and the first value of the next - bucket. -- [Issue #972](https://github.com/influxdb/influxdb/issues/972). Don't - assign duplicate server ids - -### Features - -- [Issue #722](https://github.com/influxdb/influxdb/issues/722). Add - an install target to the Makefile -- [Issue #1032](https://github.com/influxdb/influxdb/issues/1032). Include - the admin ui static assets in the binary -- [Issue #1019](https://github.com/influxdb/influxdb/issues/1019). Upgrade - to rocksdb 3.5.1 -- [Issue #992](https://github.com/influxdb/influxdb/issues/992). Add - an input plugin for collectd. (Thanks, @kimor79) -- [Issue #72](https://github.com/influxdb/influxdb/issues/72). Support merge - for multiple series using regex syntax - -## v0.8.3 [2014-09-24] - -### Bugfixes - -- [Issue #885](https://github.com/influxdb/influxdb/issues/885). Multiple - queries separated by semicolons work as expected. Queries are process - sequentially -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return an - error if an invalid column is used in the where clause -- [Issue #794](https://github.com/influxdb/influxdb/issues/794). Fix case - insensitive regex matching -- [Issue #853](https://github.com/influxdb/influxdb/issues/853). Move - cluster config from raft to API. -- [Issue #714](https://github.com/influxdb/influxdb/issues/714). Don't - panic on invalid boolean operators. -- [Issue #843](https://github.com/influxdb/influxdb/issues/843). Prevent blank database names -- [Issue #780](https://github.com/influxdb/influxdb/issues/780). Fix - fill() for all aggregators -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #923](https://github.com/influxdb/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #967](https://github.com/influxdb/influxdb/issues/967). Return an - error if the storage engine can't be created -- [Issue #954](https://github.com/influxdb/influxdb/issues/954). Don't automatically - create shards which was causing too many shards to be created when used with - grafana -- [Issue #939](https://github.com/influxdb/influxdb/issues/939). Aggregation should - ignore null values and invalid values, e.g. strings with mean(). -- [Issue #964](https://github.com/influxdb/influxdb/issues/964). Parse - big int in queries properly. - -## v0.8.2 [2014-09-05] - -### Bugfixes - -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Update shard space to not set defaults - -- [Issue #867](https://github.com/influxdb/influxdb/issues/867). Add option to return shard space mappings in list series - -### Bugfixes - -- [Issue #652](https://github.com/influxdb/influxdb/issues/652). Return - a meaningful error if an invalid column is used in where clause - after joining multiple series - -## v0.8.2 [2014-09-08] - -### Features - -- Added API endpoint to update shard space definitions - -### Bugfixes - -- [Issue #886](https://github.com/influxdb/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB - -## v0.8.1 [2014-09-03] - -- [Issue #896](https://github.com/influxdb/influxdb/issues/896). Allow logging to syslog. Thanks @malthe - -### Bugfixes - -- [Issue #868](https://github.com/influxdb/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x -- [Issue #887](https://github.com/influxdb/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled -- [Issue #674](https://github.com/influxdb/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) -- [Issue #857](https://github.com/influxdb/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) - -## v0.8.0 [2014-08-22] - -### Features - -- [Issue #850](https://github.com/influxdb/influxdb/issues/850). Makes the server listing more informative - -### Bugfixes - -- [Issue #779](https://github.com/influxdb/influxdb/issues/779). Deleting expired shards isn't thread safe. -- [Issue #860](https://github.com/influxdb/influxdb/issues/860). Load database config should validate shard spaces. -- [Issue #862](https://github.com/influxdb/influxdb/issues/862). Data migrator should have option to set delay time. - -## v0.8.0-rc.5 [2014-08-15] - -### Features - -- [Issue #376](https://github.com/influxdb/influxdb/issues/376). List series should support regex filtering -- [Issue #745](https://github.com/influxdb/influxdb/issues/745). Add continuous queries to the database config -- [Issue #746](https://github.com/influxdb/influxdb/issues/746). Add data migration tool for 0.8.0 - -### Bugfixes - -- [Issue #426](https://github.com/influxdb/influxdb/issues/426). Fill should fill the entire time range that is requested -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Don't emit non existent fields when joining series with different fields -- [Issue #744](https://github.com/influxdb/influxdb/issues/744). Admin site should have all assets locally -- [Issue #767](https://github.com/influxdb/influxdb/issues/768). Remove shards whenever they expire -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Don't emit non existent fields when joining series with different fields -- [Issue #791](https://github.com/influxdb/influxdb/issues/791). Move database config loader to be an API endpoint -- [Issue #809](https://github.com/influxdb/influxdb/issues/809). Migration path from 0.7 -> 0.8 -- [Issue #811](https://github.com/influxdb/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft -- [Issue #820](https://github.com/influxdb/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range -- [Issue #827](https://github.com/influxdb/influxdb/issues/827). Don't leak file descriptors in the WAL -- [Issue #830](https://github.com/influxdb/influxdb/issues/830). List series should return series in lexicographic sorted order -- [Issue #831](https://github.com/influxdb/influxdb/issues/831). Move create shard space to be db specific - -## v0.8.0-rc.4 [2014-07-29] - -### Bugfixes - -- [Issue #774](https://github.com/influxdb/influxdb/issues/774). Don't try to parse "inf" shard retention policy -- [Issue #769](https://github.com/influxdb/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) -- [Issue #736](https://github.com/influxdb/influxdb/issues/736). Only db admins should be able to drop a series -- [Issue #713](https://github.com/influxdb/influxdb/issues/713). Null should be a valid fill value -- [Issue #644](https://github.com/influxdb/influxdb/issues/644). Graphite api should write data in batches to the coordinator -- [Issue #740](https://github.com/influxdb/influxdb/issues/740). Panic when distinct fields are selected from an inner join -- [Issue #781](https://github.com/influxdb/influxdb/issues/781). Panic when distinct fields are added after an inner join - -## v0.8.0-rc.3 [2014-07-21] - -### Bugfixes - -- [Issue #752](https://github.com/influxdb/influxdb/issues/752). `./configure` should use goroot to find gofmt -- [Issue #758](https://github.com/influxdb/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) -- [Issue #759](https://github.com/influxdb/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) -- [Issue #760](https://github.com/influxdb/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) -- [Issue #772](https://github.com/influxdb/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. - - -## v0.8.0-rc.2 [2014-07-15] - -- This release is to fix a build error in rc1 which caused rocksdb to not be available -- Bump up the `max-open-files` option to 1000 on all storage engines -- Lower the `write-buffer-size` to 1000 - -## v0.8.0-rc.1 [2014-07-15] - -### Features - -- [Issue #643](https://github.com/influxdb/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) -- [Issue #641](https://github.com/influxdb/influxdb/issues/641). Support multiple storage engines -- [Issue #665](https://github.com/influxdb/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) -- [Issue #667](https://github.com/influxdb/influxdb/issues/667). Enable compression on all GET requests and when writing data -- [Issue #648](https://github.com/influxdb/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) -- [Issue #682](https://github.com/influxdb/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) -- [Issue #689](https://github.com/influxdb/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft -- [Issue #255](https://github.com/influxdb/influxdb/issues/255). Support millisecond precision using `ms` suffix -- [Issue #95](https://github.com/influxdb/influxdb/issues/95). Drop database should not be synchronous -- [Issue #571](https://github.com/influxdb/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies -- Default storage engine changed to RocksDB - -### Bugfixes - -- [Issue #651](https://github.com/influxdb/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) -- [Issue #670](https://github.com/influxdb/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs -- [Issue #676](https://github.com/influxdb/influxdb/issues/676). Allow storing high precision integer values without losing any information -- [Issue #695](https://github.com/influxdb/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) -- [Issue #731](https://github.com/influxdb/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false -- [Issue #733](https://github.com/influxdb/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled -- [Issue #707](https://github.com/influxdb/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character -- [Issue #734](https://github.com/influxdb/influxdb/issues/734). Don't buffer non replicated writes -- [Issue #465](https://github.com/influxdb/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore -- [Issue #358](https://github.com/influxdb/influxdb/issues/358). **BREAKING** List series should return as a single series -- [Issue #499](https://github.com/influxdb/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error -- [Issue #570](https://github.com/influxdb/influxdb/issues/570). InfluxDB crashes during delete/drop of database -- [Issue #592](https://github.com/influxdb/influxdb/issues/592). Drop series is inefficient - -## v0.7.3 [2014-06-13] - -### Bugfixes - -- [Issue #637](https://github.com/influxdb/influxdb/issues/637). Truncate log files if the last request wasn't written properly -- [Issue #646](https://github.com/influxdb/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. - -## v0.7.2 [2014-05-30] - -### Features - -- [Issue #521](https://github.com/influxdb/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) - -### Bugfixes - -- [Issue #418](https://github.com/influxdb/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. -- [Issue #606](https://github.com/influxdb/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist -- [Issue #602](https://github.com/influxdb/influxdb/issues/602). Merge will fail to work across shards - -### Features - -## v0.7.1 [2014-05-29] - -### Bugfixes - -- [Issue #579](https://github.com/influxdb/influxdb/issues/579). Reject writes to nonexistent databases -- [Issue #597](https://github.com/influxdb/influxdb/issues/597). Force compaction after deleting data - -### Features - -- [Issue #476](https://github.com/influxdb/influxdb/issues/476). Support ARM architecture -- [Issue #578](https://github.com/influxdb/influxdb/issues/578). Support aliasing for expressions in parenthesis -- [Issue #544](https://github.com/influxdb/influxdb/pull/544). Support forcing node removal from a cluster -- [Issue #591](https://github.com/influxdb/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) -- [Issue #600](https://github.com/influxdb/influxdb/pull/600). Report version, os, arch, and raftName once per day. - -## v0.7.0 [2014-05-23] - -### Bugfixes - -- [Issue #557](https://github.com/influxdb/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works -- [Issue #547](https://github.com/influxdb/influxdb/issues/547). Add difference function (Thanks, @mboelstra) -- [Issue #550](https://github.com/influxdb/influxdb/issues/550). Fix tests on 32-bit ARM -- [Issue #524](https://github.com/influxdb/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together -- [Issue #561](https://github.com/influxdb/influxdb/issues/561). Fix missing query in parsing errors -- [Issue #563](https://github.com/influxdb/influxdb/issues/563). Add sample config for graphite over udp -- [Issue #537](https://github.com/influxdb/influxdb/issues/537). Incorrect query syntax causes internal error -- [Issue #565](https://github.com/influxdb/influxdb/issues/565). Empty series names shouldn't cause a panic -- [Issue #575](https://github.com/influxdb/influxdb/issues/575). Single point select doesn't interpret timestamps correctly -- [Issue #576](https://github.com/influxdb/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq -- [Issue #560](https://github.com/influxdb/influxdb/issues/560). Use /dev/urandom instead of /dev/random -- [Issue #502](https://github.com/influxdb/influxdb/issues/502). Fix a - race condition in assigning id to db+series+field (Thanks @ohurvitz - for reporting this bug and providing a script to repro) - -### Features - -- [Issue #567](https://github.com/influxdb/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) - -### Deprecated - -- [Issue #460](https://github.com/influxdb/influxdb/issues/460). Don't start automatically after installing -- [Issue #529](https://github.com/influxdb/influxdb/issues/529). Don't run influxdb as root -- [Issue #443](https://github.com/influxdb/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins - -## v0.6.5 [2014-05-19] - -### Features - -- [Issue #551](https://github.com/influxdb/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) - -### Bugfixes - -- [Issue #555](https://github.com/influxdb/influxdb/issues/555). Fix a regression introduced in the raft snapshot format - -## v0.6.4 [2014-05-16] - -### Features - -- Make the write batch size configurable (also applies to deletes) -- Optimize writing to multiple series -- [Issue #546](https://github.com/influxdb/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) - -### Bugfixes - -- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards -- [Issue #489](https://github.com/influxdb/influxdb/issues/489). Remove replication factor from CreateDatabase command - -## v0.6.3 [2014-05-13] - -### Features - -- [Issue #505](https://github.com/influxdb/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) -- [Issue #520](https://github.com/influxdb/influxdb/issues/520). Print the version to the log file - -### Bugfixes - -- [Issue #516](https://github.com/influxdb/influxdb/issues/516). Close WAL log/index files when they aren't being used -- [Issue #532](https://github.com/influxdb/influxdb/issues/532). Don't log graphite connection EOF as an error -- [Issue #535](https://github.com/influxdb/influxdb/issues/535). WAL Replay hangs if response isn't received -- [Issue #538](https://github.com/influxdb/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns -- [Issue #536](https://github.com/influxdb/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic -- [Issue #539](https://github.com/influxdb/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups -- [Issue #534](https://github.com/influxdb/influxdb/issues/534). Create a new series when interpolating - -## v0.6.2 [2014-05-09] - -### Bugfixes - -- [Issue #511](https://github.com/influxdb/influxdb/issues/511). Don't automatically create the database when a db user is created -- [Issue #512](https://github.com/influxdb/influxdb/issues/512). Group by should respect null values -- [Issue #518](https://github.com/influxdb/influxdb/issues/518). Filter Infinities and NaNs from the returned json -- [Issue #522](https://github.com/influxdb/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files -- [Issue #369](https://github.com/influxdb/influxdb/issues/369). Fix some edge cases with WAL recovery - -## v0.6.1 [2014-05-06] - -### Bugfixes - -- [Issue #500](https://github.com/influxdb/influxdb/issues/500). Support `y` suffix in time durations -- [Issue #501](https://github.com/influxdb/influxdb/issues/501). Writes with invalid payload should be rejected -- [Issue #507](https://github.com/influxdb/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster -- [Issue #508](https://github.com/influxdb/influxdb/issues/508). Don't replay WAL entries for servers with no shards -- [Issue #464](https://github.com/influxdb/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns -- [Issue #480](https://github.com/influxdb/influxdb/issues/480). Large values on the y-axis get cut off - -## v0.6.0 [2014-05-02] - -### Feature - -- [Issue #477](https://github.com/influxdb/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) -- [Issue #491](https://github.com/influxdb/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) - -### Bugfixes - -- [Issue #469](https://github.com/influxdb/influxdb/issues/469). Drop continuous queries when a database is dropped -- [Issue #431](https://github.com/influxdb/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file -- [Issue #483](https://github.com/influxdb/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) -- [Issue #486](https://github.com/influxdb/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series -- [Issue #490](https://github.com/influxdb/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) -- [Issue #495](https://github.com/influxdb/influxdb/issues/495). Enforce write permissions properly - -## v0.5.12 [2014-04-29] - -### Bugfixes - -- [Issue #419](https://github.com/influxdb/influxdb/issues/419),[Issue #478](https://github.com/influxdb/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user - -## v0.5.11 [2014-04-25] - -### Features - -- [Issue #471](https://github.com/influxdb/influxdb/issues/471). Read and write permissions should be settable through the http api - -### Bugfixes - -- [Issue #323](https://github.com/influxdb/influxdb/issues/323). Continuous queries should guard against data loops -- [Issue #473](https://github.com/influxdb/influxdb/issues/473). Engine memory optimization - -## v0.5.10 [2014-04-22] - -### Features - -- [Issue #463](https://github.com/influxdb/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) -- [Issue #447](https://github.com/influxdb/influxdb/issues/447). Allow @ in usernames -- [Issue #466](https://github.com/influxdb/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) - -### Bugfixes - -- [Issue #458](https://github.com/influxdb/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 -- [Issue #457](https://github.com/influxdb/influxdb/issues/457). Deleting series that start with capital letters should work - -## v0.5.9 [2014-04-18] - -### Bugfixes - -- [Issue #446](https://github.com/influxdb/influxdb/issues/446). Check for (de)serialization errors -- [Issue #456](https://github.com/influxdb/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value -- [Issue #455](https://github.com/influxdb/influxdb/issues/455). Comparison operators should ignore null values - -## v0.5.8 [2014-04-17] - -- Renamed config.toml.sample to config.sample.toml - -### Bugfixes - -- [Issue #244](https://github.com/influxdb/influxdb/issues/244). Reconstruct the query from the ast -- [Issue #449](https://github.com/influxdb/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up -- [Issue #451](https://github.com/influxdb/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that - aggregation queries over large periods of time don't take insance amount of memory - -## v0.5.7 [2014-04-15] - -### Features - -- Queries are now logged as INFO in the log file before they run - -### Bugfixes - -- [Issue #328](https://github.com/influxdb/influxdb/issues/328). Join queries with math expressions don't work -- [Issue #440](https://github.com/influxdb/influxdb/issues/440). Heartbeat timeouts in logs -- [Issue #442](https://github.com/influxdb/influxdb/issues/442). shouldQuerySequentially didn't work as expected - causing count(*) queries on large time series to use - lots of memory -- [Issue #437](https://github.com/influxdb/influxdb/issues/437). Queries with negative constants don't parse properly -- [Issue #432](https://github.com/influxdb/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart -- [Issue #439](https://github.com/influxdb/influxdb/issues/439). Report the right location of the error in the query -- Fix some bugs with the WAL recovery on startup - -## v0.5.6 [2014-04-08] - -### Features - -- [Issue #310](https://github.com/influxdb/influxdb/issues/310). Request should support multiple timeseries -- [Issue #416](https://github.com/influxdb/influxdb/issues/416). Improve the time it takes to drop database - -### Bugfixes - -- [Issue #413](https://github.com/influxdb/influxdb/issues/413). Don't assume that group by interval is greater than a second -- [Issue #415](https://github.com/influxdb/influxdb/issues/415). Include the database when sending an auth error back to the user -- [Issue #421](https://github.com/influxdb/influxdb/issues/421). Make read timeout a config option -- [Issue #392](https://github.com/influxdb/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards - -### Bugfixes - -## v0.5.5 [2014-04-04] - -- Upgrade leveldb 1.10 -> 1.15 - - This should be a backward compatible change, but is here for documentation only - -### Feature - -- Add a command line option to repair corrupted leveldb databases on startup -- [Issue #401](https://github.com/influxdb/influxdb/issues/401). No limit on the number of columns in the group by clause - -### Bugfixes - -- [Issue #398](https://github.com/influxdb/influxdb/issues/398). Support now() and NOW() in the query lang -- [Issue #403](https://github.com/influxdb/influxdb/issues/403). Filtering should work with join queries -- [Issue #404](https://github.com/influxdb/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server -- [Issue #405](https://github.com/influxdb/influxdb/issues/405). Percentile shouldn't crash for small number of values -- [Issue #408](https://github.com/influxdb/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics -- [Issue #390](https://github.com/influxdb/influxdb/issues/390). Multiple response.WriteHeader when querying as admin -- [Issue #407](https://github.com/influxdb/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized -- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 - -## v0.5.4 [2014-04-02] - -### Bugfixes - -- [Issue #386](https://github.com/influxdb/influxdb/issues/386). Drop series should work with series containing dots -- [Issue #389](https://github.com/influxdb/influxdb/issues/389). Filtering shouldn't stop prematurely -- [Issue #341](https://github.com/influxdb/influxdb/issues/341). Make the number of shards that are queried in parallel configurable -- [Issue #394](https://github.com/influxdb/influxdb/issues/394). Support count(distinct) and count(DISTINCT) -- [Issue #362](https://github.com/influxdb/influxdb/issues/362). Limit should be enforced after aggregation - -## v0.5.3 [2014-03-31] - -### Bugfixes - -- [Issue #378](https://github.com/influxdb/influxdb/issues/378). Indexing should return if there are no requests added since the last index -- [Issue #370](https://github.com/influxdb/influxdb/issues/370). Filtering and limit should be enforced on the shards -- [Issue #379](https://github.com/influxdb/influxdb/issues/379). Boolean columns should be usable in where clauses -- [Issue #381](https://github.com/influxdb/influxdb/issues/381). Should be able to do deletes as a cluster admin - -## v0.5.2 [2014-03-28] - -### Bugfixes - -- [Issue #342](https://github.com/influxdb/influxdb/issues/342). Data resurrected after a server restart -- [Issue #367](https://github.com/influxdb/influxdb/issues/367). Influxdb won't start if the api port is commented out -- [Issue #355](https://github.com/influxdb/influxdb/issues/355). Return an error on wrong time strings -- [Issue #331](https://github.com/influxdb/influxdb/issues/331). Allow negative time values in the where clause -- [Issue #371](https://github.com/influxdb/influxdb/issues/371). Seris index isn't deleted when the series is dropped -- [Issue #360](https://github.com/influxdb/influxdb/issues/360). Store and recover continuous queries - -## v0.5.1 [2014-03-24] - -### Bugfixes - -- Revert the version of goraft due to a bug found in the latest version - -## v0.5.0 [2014-03-24] - -### Features - -- [Issue #293](https://github.com/influxdb/influxdb/pull/293). Implement a Graphite listener - -### Bugfixes - -- [Issue #340](https://github.com/influxdb/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order - -## v0.5.0-rc.6 [2014-03-20] - -### Bugfixes - -- Increase raft election timeout to avoid unecessary relections -- Sort points before writing them to avoid an explosion in the request - number when the points are written randomly -- [Issue #335](https://github.com/influxdb/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries -- [Issue #318](https://github.com/influxdb/influxdb/pull/318). Support EXPLAIN queries -- [Issue #333](https://github.com/influxdb/influxdb/pull/333). Fail - when the password is too short or too long instead of passing it to - the crypto library - -## v0.5.0-rc.5 [2014-03-11] - -### Bugfixes - -- [Issue #312](https://github.com/influxdb/influxdb/issues/312). WAL should wait for server id to be set before recovering -- [Issue #301](https://github.com/influxdb/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache -- [Issue #319](https://github.com/influxdb/influxdb/issues/319). Propagate engine creation error correctly to the user -- [Issue #316](https://github.com/influxdb/influxdb/issues/316). Make - sure we don't starve goroutines if we get an access denied error - from one of the shards -- [Issue #306](https://github.com/influxdb/influxdb/issues/306). Deleting/Dropping database takes a lot of memory -- [Issue #302](https://github.com/influxdb/influxdb/issues/302). Should be able to set negative timestamps on points -- [Issue #327](https://github.com/influxdb/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 -- [Issue #321](https://github.com/influxdb/influxdb/issues/321). Make sure we split points on shards properly - -## v0.5.0-rc.4 [2014-03-07] - -### Bugfixes - -- [Issue #298](https://github.com/influxdb/influxdb/issues/298). Fix limit when querying multiple shards -- [Issue #305](https://github.com/influxdb/influxdb/issues/305). Shard ids not unique after restart -- [Issue #309](https://github.com/influxdb/influxdb/issues/309). Don't relog the requests on the remote server -- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) - -## v0.5.0-rc.3 [2014-03-03] - -### Bugfixes -- [Issue #69](https://github.com/influxdb/influxdb/issues/69). Support column aliases -- [Issue #287](https://github.com/influxdb/influxdb/issues/287). Make the lru cache size configurable -- [Issue #38](https://github.com/influxdb/influxdb/issues/38). Fix a memory leak discussed in this story -- [Issue #286](https://github.com/influxdb/influxdb/issues/286). Make the number of open shards configurable -- Make LevelDB use the max open files configuration option. - -## v0.5.0-rc.2 [2014-02-27] - -### Bugfixes - -- [Issue #274](https://github.com/influxdb/influxdb/issues/274). Crash after restart -- [Issue #277](https://github.com/influxdb/influxdb/issues/277). Ensure duplicate shards won't be created -- [Issue #279](https://github.com/influxdb/influxdb/issues/279). Limits not working on regex queries -- [Issue #281](https://github.com/influxdb/influxdb/issues/281). `./influxdb -v` should print the sha when building from source -- [Issue #283](https://github.com/influxdb/influxdb/issues/283). Dropping shard and restart in cluster causes panic. -- [Issue #288](https://github.com/influxdb/influxdb/issues/288). Sequence numbers should be unique per server id - -## v0.5.0-rc.1 [2014-02-25] - -### Bugfixes - -- Ensure large deletes don't take too much memory -- [Issue #240](https://github.com/influxdb/influxdb/pull/240). Unable to query against columns with `.` in the name. -- [Issue #250](https://github.com/influxdb/influxdb/pull/250). different result between normal and continuous query with "group by" clause -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points - -### Features - -- [Issue #243](https://github.com/influxdb/influxdb/issues/243). Should have endpoint to GET a user's attributes. -- [Issue #269](https://github.com/influxdb/influxdb/pull/269), [Issue #65](https://github.com/influxdb/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards -- [Issue #164](https://github.com/influxdb/influxdb/pull/269),[Issue #103](https://github.com/influxdb/influxdb/pull/269),[Issue #166](https://github.com/influxdb/influxdb/pull/269),[Issue #165](https://github.com/influxdb/influxdb/pull/269),[Issue #132](https://github.com/influxdb/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup - -### Deprecated - -- [Issue #189](https://github.com/influxdb/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. -- [Issue #216](https://github.com/influxdb/influxdb/pull/216). Results with no points should exclude columns and points - -## v0.4.4 [2014-02-05] - -### Features - -- Make the leveldb max open files configurable in the toml file - -## v0.4.3 [2014-01-31] - -### Bugfixes - -- [Issue #225](https://github.com/influxdb/influxdb/issues/225). Remove a hard limit on the points returned by the datastore -- [Issue #223](https://github.com/influxdb/influxdb/issues/223). Null values caused count(distinct()) to panic -- [Issue #224](https://github.com/influxdb/influxdb/issues/224). Null values broke replication due to protobuf limitation - -## v0.4.1 [2014-01-30] - -### Features - -- [Issue #193](https://github.com/influxdb/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy -- [Issue #190](https://github.com/influxdb/influxdb/pull/190). Add support for SSL. -- [Issue #194](https://github.com/influxdb/influxdb/pull/194). Should be able to disable Admin interface. - -### Bugfixes - -- [Issue #33](https://github.com/influxdb/influxdb/issues/33). Don't call WriteHeader more than once per request -- [Issue #195](https://github.com/influxdb/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. -- [Issue #199](https://github.com/influxdb/influxdb/issues/199). Make the test timeout configurable -- [Issue #200](https://github.com/influxdb/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail -- [Issue #215](https://github.com/influxdb/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. - -## v0.4.0 [2014-01-17] - -## Features - -- [Issue #86](https://github.com/influxdb/influxdb/issues/86). Support arithmetic expressions in select clause -- [Issue #92](https://github.com/influxdb/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' -- [Issue #88](https://github.com/influxdb/influxdb/issues/88). Support datetime strings -- [Issue #64](https://github.com/influxdb/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) -- [Issue #78](https://github.com/influxdb/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused -- [Issue #102](https://github.com/influxdb/influxdb/issues/102). Support expressions in where condition -- [Issue #101](https://github.com/influxdb/influxdb/issues/101). Support expressions in aggregates -- [Issue #62](https://github.com/influxdb/influxdb/issues/62). Support updating and deleting column values -- [Issue #96](https://github.com/influxdb/influxdb/issues/96). Replicate deletes in a cluster -- [Issue #94](https://github.com/influxdb/influxdb/issues/94). delete queries -- [Issue #116](https://github.com/influxdb/influxdb/issues/116). Use proper logging -- [Issue #40](https://github.com/influxdb/influxdb/issues/40). Use TOML instead of JSON in the config file -- [Issue #99](https://github.com/influxdb/influxdb/issues/99). Support list series in the query language -- [Issue #149](https://github.com/influxdb/influxdb/issues/149). Cluster admins should be able to perform reads and writes. -- [Issue #108](https://github.com/influxdb/influxdb/issues/108). Querying one point using `time =` -- [Issue #114](https://github.com/influxdb/influxdb/issues/114). Servers should periodically check that they're consistent. -- [Issue #93](https://github.com/influxdb/influxdb/issues/93). Should be able to drop a time series -- [Issue #177](https://github.com/influxdb/influxdb/issues/177). Support drop series in the query language. -- [Issue #184](https://github.com/influxdb/influxdb/issues/184). Implement Raft log compaction. -- [Issue #153](https://github.com/influxdb/influxdb/issues/153). Implement continuous queries - -### Bugfixes - -- [Issue #90](https://github.com/influxdb/influxdb/issues/90). Group by multiple columns panic -- [Issue #89](https://github.com/influxdb/influxdb/issues/89). 'Group by' combined with 'where' not working -- [Issue #106](https://github.com/influxdb/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative -- [Issue #105](https://github.com/influxdb/influxdb/issues/105). Panic when using a where clause that reference columns with null values -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Remove default limits from queries -- [Issue #118](https://github.com/influxdb/influxdb/issues/118). Make column names starting with '_' legal -- [Issue #121](https://github.com/influxdb/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails -- [Issue #127](https://github.com/influxdb/influxdb/issues/127). Return error on delete queries with where condition that don't have time -- [Issue #117](https://github.com/influxdb/influxdb/issues/117). Fill empty groups with default values -- [Issue #150](https://github.com/influxdb/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. -- [Issue #158](https://github.com/influxdb/influxdb/issues/158). Logged deletes should be stored with the time range if missing. -- [Issue #136](https://github.com/influxdb/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays -- [Issue #145](https://github.com/influxdb/influxdb/issues/145). Server fails to join cluster if all starting at same time. -- [Issue #176](https://github.com/influxdb/influxdb/issues/176). Drop database should take effect on all nodes -- [Issue #180](https://github.com/influxdb/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. -- [Issue #182](https://github.com/influxdb/influxdb/issues/182). Queries with invalid limit clause crash the server - -### Deprecated - -- deprecate '==' and '!=' in favor of '=' and '<>', respectively -- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` -- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. -- Querying for column names that don't exist no longer throws an error. - -## v0.3.2 - -## Features - -- [Issue #82](https://github.com/influxdb/influxdb/issues/82). Add endpoint for listing available admin interfaces. -- [Issue #80](https://github.com/influxdb/influxdb/issues/80). Support durations when specifying start and end time -- [Issue #81](https://github.com/influxdb/influxdb/issues/81). Add support for IN - -## Bugfixes - -- [Issue #75](https://github.com/influxdb/influxdb/issues/75). Don't allow time series names that start with underscore -- [Issue #85](https://github.com/influxdb/influxdb/issues/85). Non-existing columns exist after they have been queried before - -## v0.3.0 - -## Features - -- [Issue #51](https://github.com/influxdb/influxdb/issues/51). Implement first and last aggregates -- [Issue #35](https://github.com/influxdb/influxdb/issues/35). Support table aliases in Join Queries -- [Issue #71](https://github.com/influxdb/influxdb/issues/71). Add WillReturnSingleSeries to the Query -- [Issue #61](https://github.com/influxdb/influxdb/issues/61). Limit should default to 10k -- [Issue #59](https://github.com/influxdb/influxdb/issues/59). Add histogram aggregate function - -## Bugfixes - -- Fix join and merges when the query is a descending order query -- [Issue #57](https://github.com/influxdb/influxdb/issues/57). Don't panic when type of time != float -- [Issue #63](https://github.com/influxdb/influxdb/issues/63). Aggregate queries should not have a sequence_number column - -## v0.2.0 - -### Features - -- [Issue #37](https://github.com/influxdb/influxdb/issues/37). Support the negation of the regex matcher !~ -- [Issue #47](https://github.com/influxdb/influxdb/issues/47). Spill out query and database detail at the time of bug report - -### Bugfixes - -- [Issue #36](https://github.com/influxdb/influxdb/issues/36). The regex operator should be =~ not ~= -- [Issue #39](https://github.com/influxdb/influxdb/issues/39). Return proper content types from the http api -- [Issue #42](https://github.com/influxdb/influxdb/issues/42). Make the api consistent with the docs -- [Issue #41](https://github.com/influxdb/influxdb/issues/41). Table/Points not deleted when database is dropped -- [Issue #45](https://github.com/influxdb/influxdb/issues/45). Aggregation shouldn't mess up the order of the points -- [Issue #44](https://github.com/influxdb/influxdb/issues/44). Fix crashes on RHEL 5.9 -- [Issue #34](https://github.com/influxdb/influxdb/issues/34). Ascending order always return null for columns that have a null value -- [Issue #55](https://github.com/influxdb/influxdb/issues/55). Limit should limit the points that match the Where clause -- [Issue #53](https://github.com/influxdb/influxdb/issues/53). Writing null values via HTTP API fails - -### Deprecated - -- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` -- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. - -## v0.1.0 - -### Features - -- [Issue #29](https://github.com/influxdb/influxdb/issues/29). Semicolon is now optional in queries -- [Issue #31](https://github.com/influxdb/influxdb/issues/31). Support Basic Auth as well as query params for authentication. - -### Bugfixes - -- Don't allow creating users with empty username -- [Issue #22](https://github.com/influxdb/influxdb/issues/22). Don't set goroot if it was set -- [Issue #25](https://github.com/influxdb/influxdb/issues/25). Fix queries that use the median aggregator -- [Issue #26](https://github.com/influxdb/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data -- [Issue #27](https://github.com/influxdb/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values -- [Issue #30](https://github.com/influxdb/influxdb/issues/30). Column indexes/names getting off somehow -- [Issue #32](https://github.com/influxdb/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli - -## v0.0.9 - -#### Features - -- Add stddev(...) support -- Better docs, thanks @auxesis and @d-snp. - -#### Bugfixes - -- Set PYTHONPATH and CC appropriately on mac os x. -- [Issue #18](https://github.com/influxdb/influxdb/issues/18). Fix 386 debian and redhat packages -- [Issue #23](https://github.com/influxdb/influxdb/issues/23). Fix the init scripts on redhat - -## v0.0.8 - -#### Features - -- Add a way to reset the root password from the command line. -- Add distinct(..) and derivative(...) support -- Print test coverage if running go1.2 - -#### Bugfixes - -- Fix the default admin site path in the .deb and .rpm packages. -- Fix the configuration filename in the .tar.gz package. - -## v0.0.7 - -#### Features - -- include the admin site in the repo to make it easier for newcomers. - -## v0.0.6 - -#### Features - -- Add count(distinct(..)) support - -#### Bugfixes - -- Reuse levigo read/write options. - -## v0.0.5 - -#### Features - -- Cache passwords in memory to speed up password verification -- Add MERGE and INNER JOIN support - -#### Bugfixes - -- All columns should be returned if `select *` was used -- Read/Write benchmarks - -## v0.0.2 - -#### Features - -- Add an admin UI -- Deb and RPM packages - -#### Bugfixes - -- Fix some nil pointer dereferences -- Cleanup the aggregators implementation - -## v0.0.1 [2013-10-22] - - * Initial Release diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md deleted file mode 100644 index 56d188693..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/CONTRIBUTING.md +++ /dev/null @@ -1,231 +0,0 @@ -Contributing to InfluxDB -======================== - -Bug reports ---------------- -Before you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed. If you file an issue, please include the following. -* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04. -* The version of InfluxDB you are running -* Whether you installed it using a pre-built package, or built it from source. -* A small test case, if applicable, that demonstrates the issues. - -Remember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.** -If you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay "How to Report Bugs Effectively."](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html) - -Test cases should be in the form of `curl` commands. For example: -``` -# create database -curl -G http://localhost:8086/query --data-urlencode "q=CREATE DATABASE mydb" - -# create retention policy -curl -G http://localhost:8086/query --data-urlencode "q=CREATE RETENTION POLICY myrp ON mydb DURATION 365d REPLICATION 1 DEFAULT" - -# write data -curl -X POST http://localhost:8086/write --data-urlencode "db=mydb" --data-binary "cpu,region=useast,host=server_1,service=redis value=61" - -# Delete a Measurement -curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=DROP MEASUREMENT cpu' - -# Query the Measurement -# Bug: expected it to return no data, but data comes back. -curl -G http://localhost:8086/query --data-urlencode 'db=mydb' --data-urlencode 'q=SELECT * from cpu' -``` -**If you don't include a clear test case like this, your issue may not be investigated, and may even be closed**. If writing the data is too difficult, please zip up your data directory and include a link to it in your bug report. - -Please note that issues are *not the place to file general questions* such as "how do I use collectd with InfluxDB?" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed. - -Feature requests ---------------- -We really like to receive feature requests, as it helps us prioritize our work. Please be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to InfluxDB. - -Contributing to the source code ---------------- - -InfluxDB follows standard Go project structure. This means that all -your go development are done in `$GOPATH/src`. GOPATH can be any -directory under which InfluxDB and all its dependencies will be -cloned. For more details on recommended go project's structure, see -[How to Write Go Code](http://golang.org/doc/code.html) and -[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow -the steps below. - -Submitting a pull request ------------- -To submit a pull request you should fork the InfluxDB repository, and make your change on a feature branch of your fork. Then generate a pull request from your branch against *master* of the InfluxDB repository. Include in your pull request details of your change -- the why *and* the how -- as well as the testing your performed. Also, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged. - -There will usually be some back and forth as we finalize the change, but once that completes it may be merged. - -To assist in review for the PR, please add the following to your pull request comment: - -```md -- [ ] CHANGELOG.md updated -- [ ] Rebased/mergable -- [ ] Tests pass -- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed) -``` - -Use of third-party packages ------------- -A third-party package is defined as one that is not part of the standard Go distribution. Generally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarly. We'll often write a little bit of code rather than pull in a third-party package. Of course, we do use some third-party packages -- most importantly we use [BoltDB](https://github.com/boltdb/bolt) as the storage engine. So to maximise the chance your change will be accepted by us, use only the standard libaries, or the third-party packages we have decided to use. - -For rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/). - -Signing the CLA ---------------- - -If you are going to be contributing back to InfluxDB please take a -second to sign our CLA, which can be found -[on our website](http://influxdb.com/community/cla.html). - -Installing Go -------------- -InfluxDB requires Go 1.4 or greater. - -At InfluxDB we find gvm, a Go version manager, useful for installing Go. For instructions -on how to install it see [the gvm page on github](https://github.com/moovweb/gvm). - -After installing gvm you can install and set the default go version by -running the following: - - gvm install go1.4 - gvm use go1.4 --default - -Revision Control Systems ------- -Go has the ability to import remote packages via revision control systems with the `go get` command. To ensure that you can retrieve any remote package, be sure to install the following rcs software to your system. -Currently the project only depends on `git` and `mercurial`. - -* [Install Git](http://git-scm.com/book/en/Getting-Started-Installing-Git) -* [Install Mercurial](http://mercurial.selenic.com/wiki/Download) - -Project structure ------------------ -First you need to setup the project structure: - - export GOPATH=$HOME/gocodez - mkdir -p $GOPATH/src/github.com/influxdb - cd $GOPATH/src/github.com/influxdb - git clone git@github.com:influxdb/influxdb - -You can add the line `export GOPATH=$HOME/gocodez` to your bash/zsh -file to be set for every shell instead of having to manually run it -everytime. - -We have a pre commit hook to make sure code is formatted properly -and vetted before you commit any changes. We strongly recommend using the pre -commit hook to guard against accidentally committing unformatted -code. To use the pre-commit hook, run the following: - - cd $GOPATH/src/github.com/influxdb/influxdb - cp .hooks/pre-commit .git/hooks/ - -In case the commit is rejected because it's not formatted you can run -the following to format the code: - -``` -go fmt ./... -go vet ./... -``` - -To install go vet, run the following command: -``` -go get golang.org/x/tools/cmd/vet -``` - -NOTE: If you have not installed mercurial, the above command will fail. See [Revision Control Systems](#revision-control-systems) above. - -For more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet). - -Build and Test ------ - -Make sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands: - -```bash -cd $GOPATH/src/github.com/influxdb -go get -u -f -t ./... -go build ./... -``` - -To then install the binaries, run the following command. They can be found in `$GOPATH/bin`. Please note that the InfluxDB binary is named `influxd`, not `influxdb`. - -```bash -go install ./... -``` - -To set the version and commit flags during the build pass the following to the build command: - -```bash --ldflags="-X main.version $VERSION -X main.branch $BRANCH -X main.commit $COMMIT" -``` - -where `$VERSION` is the version, `$BRANCH` is the branch, and `$COMMIT` is the git commit hash. - -To run the tests, execute the following command: - -```bash -cd $GOPATH/src/github.com/influxdb/influxdb -go test -v ./... - -# run tests that match some pattern -go test -run=TestDatabase . -v - -# run tests and show coverage -go test -coverprofile /tmp/cover . && go tool cover -html /tmp/cover -``` - -To install go cover, run the following command: -``` -go get golang.org/x/tools/cmd/cover -``` - -Generated Google Protobuf code ------------------ -Most changes to the source do not require that the generated protocol buffer code be changed. But if you need to modify the protocol buffer code, you'll first need to install the protocol buffers toolchain. - -First install the [protocol buffer compiler](https://developers.google.com/protocol-buffers/ -) 2.6.1 or later for your OS: - -Then install the go plugins: - -```bash -go get github.com/gogo/protobuf/proto -go get github.com/gogo/protobuf/protoc-gen-gogo -go get github.com/gogo/protobuf/gogoproto -``` - -Finally run, `go generate` after updating any `*.proto` file: - -```bash -go generate ./... -``` -**Trouleshooting** - -If generating the protobuf code is failing for you, check each of the following: - * Ensure the protobuf library can be found. Make sure that `LD_LIBRRARY_PATH` includes the directory in which the library `libprotoc.so` has been installed. - * Ensure the command `protoc-gen-gogo`, found in `GOPATH/bin`, is on your path. This can be done by adding `GOPATH/bin` to `PATH`. - -Profiling ------ -When troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example: - -```sh -# start influx with profiling -./influxd -cpuprofile influxd.prof -# run queries, writes, whatever you're testing -# open up pprof -go tool pprof influxd influxd.prof -# once inside run "web", opens up browser with the CPU graph -# can also run "web " to zoom in. Or "list " to see specific lines -``` - -Continuous Integration testing ------ -InfluxDB uses CirceCI for continuous integration testing. To see how the code is built and tested, check out [this file](https://github.com/influxdb/influxdb/blob/master/circle-test.sh). It closely follows the build and test process outlined above. You can see the exact version of Go InfluxDB uses for testing by consulting that file. - -Useful links ------------- -- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go) -- [Go in production](http://peter.bourgon.org/go-in-production/) -- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/) -- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md deleted file mode 100644 index adb33a416..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/DOCKER.md +++ /dev/null @@ -1,44 +0,0 @@ -# Docker Setup -======================== - -This document describes how to build and run a minimal InfluxDB container under Docker. Currently, it has only been tested for local development and assumes that you have a working docker environment. - -## Building Image - -To build a docker image for InfluxDB from your current checkout, run the following: - -``` -$ ./build-docker.sh -``` - -This script uses the `golang:1.5` image to build a fully static binary of `influxd` and then adds it to a minimal `scratch` image. - -To build the image using a different version of go: - -``` -$ GO_VER=1.4.2 ./build-docker.sh -``` - -Available version can be found [here](https://hub.docker.com/_/golang/). - -## Single Node Container - -This will start an interactive, single-node, that publishes the containers port `8086` and `8088` to the hosts ports `8086` and `8088` respectively. This is identical to starting `influxd` manually. - -``` -$ docker run -it -p 8086:8086 -p 8088:8088 influxdb -``` - -## Multi-Node Cluster - -This will create a simple 3-node cluster. The data is stored within the container and will be lost when the container is removed. This is only useful for test clusters. - -The `HOST_IP` env variable should be your host IP if running under linux or the virtualbox VM IP if running under OSX. On OSX, this would be something like: `$(docker-machine ip dev)` or `$(boot2docker ip)` depending on which docker tool you are using. - -``` -$ export HOST_IP= -$ docker run -it -p 8086:8088 -p 8088:8088 influxdb -hostname $HOST_IP:8088 -$ docker run -it -p 8186:8088 -p 8188:8088 influxdb -hostname $HOST_IP:8188 -join $HOST_IP:8088 -$ docker run -it -p 8286:8088 -p 8288:8088 influxdb -hostname $HOST_IP:8288 -join $HOST_IP:8088 -``` - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile deleted file mode 100644 index d30cd300d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM busybox:ubuntu-14.04 - -MAINTAINER Jason Wilder "" - -# admin, http, udp, cluster, graphite, opentsdb, collectd -EXPOSE 8083 8086 8086/udp 8088 2003 4242 25826 - -WORKDIR /app - -# copy binary into image -COPY influxd /app/ - -# Add influxd to the PATH -ENV PATH=/app:$PATH - -# Generate a default config -RUN influxd config > /etc/influxdb.toml - -# Use /data for all disk storage -RUN sed -i 's/dir = "\/.*influxdb/dir = "\/data/' /etc/influxdb.toml - -VOLUME ["/data"] - -ENTRYPOINT ["influxd", "--config", "/etc/influxdb.toml"] diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE b/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE deleted file mode 100644 index d50222706..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2015 Errplane Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md deleted file mode 100644 index 8491aa7ec..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/QUERIES.md +++ /dev/null @@ -1,180 +0,0 @@ -The top level name is called a measurement. These names can contain any characters. Then there are field names, field values, tag keys and tag values, which can also contain any characters. However, if the measurement, field, or tag contains any character other than [A-Z,a-z,0-9,_], or if it starts with a digit, it must be double-quoted. Therefore anywhere a measurement name, field name, field value, tag name, or tag value appears it should be wrapped in double quotes. - -# Databases & retention policies - -```sql --- create a database -CREATE DATABASE - --- create a retention policy -CREATE RETENTION POLICY ON DURATION REPLICATION [DEFAULT] - --- alter retention policy -ALTER RETENTION POLICY ON (DURATION | REPLICATION | DEFAULT)+ - --- drop a database -DROP DATABASE - --- drop a retention policy -DROP RETENTION POLICY ON -``` -where `` is either `INF` for infinite retention, or an integer followed by the desired unit of time: u,ms,s,m,h,d,w for microseconds, milliseconds, seconds, minutes, hours, days, or weeks, respectively. `` must be an integer. - -If present, `DEFAULT` sets the retention policy as the default retention policy for writes and reads. - -# Users and permissions - -```sql --- create user -CREATE USER WITH PASSWORD '' - --- grant privilege on a database -GRANT ON TO - --- grant cluster admin privileges -GRANT ALL [PRIVILEGES] TO - --- revoke privilege -REVOKE ON FROM - --- revoke all privileges for a DB -REVOKE ALL [PRIVILEGES] ON FROM - --- revoke all privileges including cluster admin -REVOKE ALL [PRIVILEGES] FROM - --- combine db creation with privilege assignment (user must already exist) -CREATE DATABASE GRANT TO -CREATE DATABASE REVOKE FROM - --- delete a user -DROP USER - - -``` -where ` := READ | WRITE | All `. - -Authentication must be enabled in the influxdb.conf file for user permissions to be in effect. - -By default, newly created users have no privileges to any databases. - -Cluster administration privileges automatically grant full read and write permissions to all databases, regardless of subsequent database-specific privilege revocation statements. - -# Select - -```sql -SELECT mean(value) from cpu WHERE host = 'serverA' AND time > now() - 4h GROUP BY time(5m) - -SELECT mean(value) from cpu WHERE time > now() - 4h GROUP BY time(5m), region -``` - -## Group By - -# Delete - -# Series - -## Destroy - -```sql -DROP MEASUREMENT -DROP MEASUREMENT cpu WHERE region = 'uswest' -``` - -## Show - -Show series queries are for pulling out individual series from measurement names and tag data. They're useful for discovery. - -```sql --- show all databases -SHOW DATABASES - --- show measurement names -SHOW MEASUREMENTS -SHOW MEASUREMENTS LIMIT 15 -SHOW MEASUREMENTS LIMIT 10 OFFSET 40 -SHOW MEASUREMENTS WHERE service = 'redis' --- LIMIT and OFFSET can be applied to any of the SHOW type queries - --- show all series across all measurements/tagsets -SHOW SERIES - --- get a show of all series for any measurements where tag key region = tak value 'uswest' -SHOW SERIES WHERE region = 'uswest' - -SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 - --- returns the 100 - 109 rows in the result. In the case of SHOW SERIES, which returns --- series split into measurements. Each series counts as a row. So you could see only a --- single measurement returned, but 10 series within it. -SHOW SERIES FROM cpu_load WHERE region = 'uswest' LIMIT 10 OFFSET 100 - --- show all retention policies on a database -SHOW RETENTION POLICIES ON mydb - --- get a show of all tag keys across all measurements -SHOW TAG KEYS - --- show all the tag keys for a given measurement -SHOW TAG KEYS FROM cpu -SHOW TAG KEYS FROM temperature, wind_speed - --- show all the tag values. note that a single WHERE TAG KEY = '...' clause is required -SHOW TAG VALUES WITH TAG KEY = 'region' -SHOW TAG VALUES FROM cpu WHERE region = 'uswest' WITH TAG KEY = 'host' - --- and you can do stuff against fields -SHOW FIELD KEYS FROM cpu - --- but you can't do this -SHOW FIELD VALUES --- we don't index field values, so this query should be invalid. - --- show all users -SHOW USERS -``` - -Note that `FROM` and `WHERE` are optional clauses in most of the show series queries. - -And the show series output looks like this: - -```json -[ - { - "name": "cpu", - "columns": ["id", "region", "host"], - "values": [ - 1, "uswest", "servera", - 2, "uswest", "serverb" - ] - }, - { - "name": "reponse_time", - "columns": ["id", "application", "host"], - "values": [ - 3, "myRailsApp", "servera" - ] - } -] -``` - -# Continuous Queries - -Continous queries are going to be inspired by MySQL `TRIGGER` syntax: - -http://dev.mysql.com/doc/refman/5.0/en/trigger-syntax.html - -Instead of having automatically-assigned ids, named continuous queries allows for some level of duplication prevention, -particularly in the case where creation is scripted. - -## Create - - CREATE CONTINUOUS QUERY AS SELECT ... FROM ... - -## Destroy - - DROP CONTINUOUS QUERY - -## List - - SHOW CONTINUOUS QUERIES diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md deleted file mode 100644 index a13be4127..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# InfluxDB [![Circle CI](https://circleci.com/gh/influxdb/influxdb/tree/master.svg?style=svg)](https://circleci.com/gh/influxdb/influxdb/tree/master) - -## An Open-Source, Distributed, Time Series Database - -> InfluxDB v0.9.0 is now out. Going forward, the 0.9.x series of releases will not make breaking API changes or breaking changes to the underlying data storage. However, 0.9.0 clustering should be considered an alpha release. - -InfluxDB is an open source **distributed time series database** with -**no external dependencies**. It's useful for recording metrics, -events, and performing analytics. - -## Features - -* Built-in [HTTP API](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html) so you don't have to write any server side code to get up and running. -* Data can be tagged, allowing very flexible querying. -* SQL-like query language. -* Clustering is supported out of the box, so that you can scale horizontally to handle your data. -* Simple to install and manage, and fast to get data in and out. -* It aims to answer queries in real-time. That means every data point is - indexed as it comes in and is immediately available in queries that - should return in < 100ms. - -## Getting Started -*The following directions apply only to the 0.9.0 release or building from the source on master.* - -### Building - -You don't need to build the project to use it - you can use any of our -[pre-built packages](http://influxdb.com/download/index.html) to install InfluxDB. That's -the recommended way to get it running. However, if you want to contribute to the core of InfluxDB, you'll need to build. -For those adventurous enough, you can -[follow along on our docs](http://github.com/influxdb/influxdb/blob/master/CONTRIBUTING.md). - -### Starting InfluxDB -* `service influxdb start` if you have installed InfluxDB using an official Debian or RPM package. -* `$GOPATH/bin/influxd` if you have built InfluxDB from source. - -### Creating your first database - -``` -curl -G 'http://localhost:8086/query' --data-urlencode "q=CREATE DATABASE mydb" -``` - -### Insert some data -``` -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server01,region=uswest load=42 1434055562000000000' - -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server02,region=uswest load=78 1434055562000000000' - -curl -XPOST 'http://localhost:8086/write?db=mydb' \ --d 'cpu,host=server03,region=useast load=15.4 1434055562000000000' -``` - -### Query for the data -```JSON -curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ ---data-urlencode "q=SELECT * FROM cpu WHERE host='server01' AND time < now - 1d" -``` - -### Analyze the data -```JSON -curl -G http://localhost:8086/query?pretty=true --data-urlencode "db=mydb" \ ---data-urlencode "q=SELECT mean(load) FROM cpu WHERE region='uswest'" -``` - -## Helpful Links - -* Understand the [design goals and motivations of the project](http://influxdb.com/docs/v0.9/introduction/overview.html). -* Follow the [getting started guide](http://influxdb.com/docs/v0.9/introduction/getting_started.html) to find out how to install InfluxDB, start writing more data, and issue more queries - in just a few minutes. -* See the [HTTP API documentation to start writing a library for your favorite language](http://influxdb.com/docs/v0.9/concepts/reading_and_writing_data.html). diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go deleted file mode 100644 index 25abbf6f1..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer.go +++ /dev/null @@ -1,78 +0,0 @@ -package influxdb - -import ( - "math/rand" - - "github.com/influxdb/influxdb/meta" -) - -// Balancer represents a load-balancing algorithm for a set of nodes -type Balancer interface { - // Next returns the next Node according to the balancing method - // or nil if there are no nodes available - Next() *meta.NodeInfo -} - -type nodeBalancer struct { - nodes []meta.NodeInfo // data nodes to balance between - p int // current node index -} - -// NewNodeBalancer create a shuffled, round-robin balancer so that -// multiple instances will return nodes in randomized order and each -// each returned node will be repeated in a cycle -func NewNodeBalancer(nodes []meta.NodeInfo) Balancer { - // make a copy of the node slice so we can randomize it - // without affecting the original instance as well as ensure - // that each Balancer returns nodes in a different order - b := &nodeBalancer{} - - b.nodes = make([]meta.NodeInfo, len(nodes)) - copy(b.nodes, nodes) - - b.shuffle() - return b -} - -// shuffle randomizes the ordering the balancers available nodes -func (b *nodeBalancer) shuffle() { - for i := range b.nodes { - j := rand.Intn(i + 1) - b.nodes[i], b.nodes[j] = b.nodes[j], b.nodes[i] - } -} - -// online returns a slice of the nodes that are online -func (b *nodeBalancer) online() []meta.NodeInfo { - return b.nodes - // now := time.Now().UTC() - // up := []meta.NodeInfo{} - // for _, n := range b.nodes { - // if n.OfflineUntil.After(now) { - // continue - // } - // up = append(up, n) - // } - // return up -} - -// Next returns the next available nodes -func (b *nodeBalancer) Next() *meta.NodeInfo { - // only use online nodes - up := b.online() - - // no nodes online - if len(up) == 0 { - return nil - } - - // rollover back to the beginning - if b.p >= len(up) { - b.p = 0 - } - - d := &up[b.p] - b.p += 1 - - return d -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go deleted file mode 100644 index ca1942c33..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/balancer_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package influxdb_test - -import ( - "fmt" - "testing" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/meta" -) - -func NewNodes() []meta.NodeInfo { - var nodes []meta.NodeInfo - for i := 1; i <= 2; i++ { - nodes = append(nodes, meta.NodeInfo{ - ID: uint64(i), - Host: fmt.Sprintf("localhost:999%d", i), - }) - } - return nodes -} - -func TestBalancerEmptyNodes(t *testing.T) { - b := influxdb.NewNodeBalancer([]meta.NodeInfo{}) - got := b.Next() - if got != nil { - t.Errorf("expected nil, got %v", got) - } -} - -func TestBalancerUp(t *testing.T) { - nodes := NewNodes() - b := influxdb.NewNodeBalancer(nodes) - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node in randomized round-robin order - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should never get the same node in order twice - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} - -/* -func TestBalancerDown(t *testing.T) { - nodes := NewNodes() - b := influxdb.NewNodeBalancer(nodes) - - nodes[0].Down() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Health node should be returned each time - if first.ID != 2 && first.ID != second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ - -/* -func TestBalancerBackUp(t *testing.T) { - nodes := newDataNodes() - b := influxdb.NewNodeBalancer(nodes) - - nodes[0].Down() - - for i := 0; i < 3; i++ { - got := b.Next() - if got == nil { - t.Errorf("expected datanode, got %v", got) - } - - if exp := uint64(2); got.ID != exp { - t.Errorf("wrong node id: exp %v, got %v", exp, got.ID) - } - } - - nodes[0].Up() - - // First node in randomized round-robin order - first := b.Next() - if first == nil { - t.Errorf("expected datanode, got %v", first) - } - - // Second node should rollover to the first up node - second := b.Next() - if second == nil { - t.Errorf("expected datanode, got %v", second) - } - - // Should get both nodes returned - if first.ID == second.ID { - t.Errorf("expected first != second. got %v = %v", first.ID, second.ID) - } -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh deleted file mode 100644 index 6f303201e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/build-docker.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh -x -e - -GO_VER=${GO_VER:-1.5} - -docker run -it -v "$GOPATH":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:$GO_VER sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o influxd ./cmd/influxd' - -docker build -t influxdb . diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh deleted file mode 100644 index a53ac6487..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle-test.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -# -# This is the InfluxDB CircleCI test script. Using this script allows total control -# the environment in which the build and test is run, and matches the official -# build process for InfluxDB. - -BUILD_DIR=$HOME/influxdb-build -GO_VERSION=go1.4.2 -PARALLELISM="-parallel 256" -TIMEOUT="-timeout 480s" - -# Executes the given statement, and exits if the command returns a non-zero code. -function exit_if_fail { - command=$@ - echo "Executing '$command'" - $command - rc=$? - if [ $rc -ne 0 ]; then - echo "'$command' returned $rc." - exit $rc - fi -} - -source $HOME/.gvm/scripts/gvm -exit_if_fail gvm use $GO_VERSION - -# Set up the build directory, and then GOPATH. -exit_if_fail mkdir $BUILD_DIR -export GOPATH=$BUILD_DIR -exit_if_fail mkdir -p $GOPATH/src/github.com/influxdb - -# Dump some test config to the log. -echo "Test configuration" -echo "========================================" -echo "\$HOME: $HOME" -echo "\$GOPATH: $GOPATH" -echo "\$CIRCLE_BRANCH: $CIRCLE_BRANCH" - -# Move the checked-out source to a better location. -exit_if_fail mv $HOME/influxdb $GOPATH/src/github.com/influxdb -exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb -exit_if_fail git branch --set-upstream-to=origin/$CIRCLE_BRANCH $CIRCLE_BRANCH - -# Install the code. -exit_if_fail cd $GOPATH/src/github.com/influxdb/influxdb -exit_if_fail go get -t -d -v ./... -exit_if_fail git checkout $CIRCLE_BRANCH # 'go get' switches to master. Who knew? Switch back. -exit_if_fail go build -v ./... - -# Run the tests. -exit_if_fail go tool vet --composites=false . -case $CIRCLE_NODE_INDEX in - 0) - go test $PARALLELISM $TIMEOUT -v ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs.txt - rc=${PIPESTATUS[0]} - ;; - 1) - GORACE="halt_on_error=1" go test $PARALLELISM $TIMEOUT -v -race ./... 2>&1 | tee $CIRCLE_ARTIFACTS/test_logs_race.txt - rc=${PIPESTATUS[0]} - ;; -esac - -exit $rc diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml b/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml deleted file mode 100644 index 141e26fda..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/circle.yml +++ /dev/null @@ -1,12 +0,0 @@ -machine: - pre: - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer) - - source $HOME/.gvm/scripts/gvm; gvm install go1.4.2 --binary - -dependencies: - override: - - echo "Dummy override, so no Circle dependencies execute" -test: - override: - - bash circle-test.sh: - parallel: true diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go deleted file mode 100644 index fed7e18e0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/client_pool.go +++ /dev/null @@ -1,57 +0,0 @@ -package cluster - -import ( - "net" - "sync" - - "gopkg.in/fatih/pool.v2" -) - -type clientPool struct { - mu sync.RWMutex - pool map[uint64]pool.Pool -} - -func newClientPool() *clientPool { - return &clientPool{ - pool: make(map[uint64]pool.Pool), - } -} - -func (c *clientPool) setPool(nodeID uint64, p pool.Pool) { - c.mu.Lock() - c.pool[nodeID] = p - c.mu.Unlock() -} - -func (c *clientPool) getPool(nodeID uint64) (pool.Pool, bool) { - c.mu.RLock() - p, ok := c.pool[nodeID] - c.mu.RUnlock() - return p, ok -} - -func (c *clientPool) size() int { - c.mu.RLock() - var size int - for _, p := range c.pool { - size += p.Len() - } - c.mu.RUnlock() - return size -} - -func (c *clientPool) conn(nodeID uint64) (net.Conn, error) { - c.mu.RLock() - conn, err := c.pool[nodeID].Get() - c.mu.RUnlock() - return conn, err -} - -func (c *clientPool) close() { - c.mu.Lock() - for _, p := range c.pool { - p.Close() - } - c.mu.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go deleted file mode 100644 index 3a67b32d0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config.go +++ /dev/null @@ -1,35 +0,0 @@ -package cluster - -import ( - "time" - - "github.com/influxdb/influxdb/toml" -) - -const ( - // DefaultWriteTimeout is the default timeout for a complete write to succeed. - DefaultWriteTimeout = 5 * time.Second - - // DefaultShardWriterTimeout is the default timeout set on shard writers. - DefaultShardWriterTimeout = 5 * time.Second - - // DefaultShardMapperTimeout is the default timeout set on shard mappers. - DefaultShardMapperTimeout = 5 * time.Second -) - -// Config represents the configuration for the clustering service. -type Config struct { - ForceRemoteShardMapping bool `toml:"force-remote-mapping"` - WriteTimeout toml.Duration `toml:"write-timeout"` - ShardWriterTimeout toml.Duration `toml:"shard-writer-timeout"` - ShardMapperTimeout toml.Duration `toml:"shard-mapper-timeout"` -} - -// NewConfig returns an instance of Config with defaults. -func NewConfig() Config { - return Config{ - WriteTimeout: toml.Duration(DefaultWriteTimeout), - ShardWriterTimeout: toml.Duration(DefaultShardWriterTimeout), - ShardMapperTimeout: toml.Duration(DefaultShardMapperTimeout), - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go deleted file mode 100644 index db5e5ddc1..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/config_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package cluster_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cluster" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c cluster.Config - if _, err := toml.Decode(` -shard-writer-timeout = "10s" -write-timeout = "20s" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if time.Duration(c.ShardWriterTimeout) != 10*time.Second { - t.Fatalf("unexpected shard-writer timeout: %s", c.ShardWriterTimeout) - } else if time.Duration(c.WriteTimeout) != 20*time.Second { - t.Fatalf("unexpected write timeout s: %s", c.WriteTimeout) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go deleted file mode 100644 index 1d53707c8..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.pb.go +++ /dev/null @@ -1,286 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: internal/data.proto -// DO NOT EDIT! - -/* -Package internal is a generated protocol buffer package. - -It is generated from these files: - internal/data.proto - -It has these top-level messages: - WriteShardRequest - Field - Tag - Point - WriteShardResponse - MapShardRequest - MapShardResponse -*/ -package internal - -import proto "github.com/gogo/protobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type WriteShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"` - Points []*Point `protobuf:"bytes,2,rep" json:"Points,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardRequest) Reset() { *m = WriteShardRequest{} } -func (m *WriteShardRequest) String() string { return proto.CompactTextString(m) } -func (*WriteShardRequest) ProtoMessage() {} - -func (m *WriteShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *WriteShardRequest) GetPoints() []*Point { - if m != nil { - return m.Points - } - return nil -} - -type Field struct { - Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"` - Int32 *int32 `protobuf:"varint,2,opt" json:"Int32,omitempty"` - Int64 *int64 `protobuf:"varint,3,opt" json:"Int64,omitempty"` - Float64 *float64 `protobuf:"fixed64,4,opt" json:"Float64,omitempty"` - Bool *bool `protobuf:"varint,5,opt" json:"Bool,omitempty"` - String_ *string `protobuf:"bytes,6,opt" json:"String,omitempty"` - Bytes []byte `protobuf:"bytes,7,opt" json:"Bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Field) Reset() { *m = Field{} } -func (m *Field) String() string { return proto.CompactTextString(m) } -func (*Field) ProtoMessage() {} - -func (m *Field) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Field) GetInt32() int32 { - if m != nil && m.Int32 != nil { - return *m.Int32 - } - return 0 -} - -func (m *Field) GetInt64() int64 { - if m != nil && m.Int64 != nil { - return *m.Int64 - } - return 0 -} - -func (m *Field) GetFloat64() float64 { - if m != nil && m.Float64 != nil { - return *m.Float64 - } - return 0 -} - -func (m *Field) GetBool() bool { - if m != nil && m.Bool != nil { - return *m.Bool - } - return false -} - -func (m *Field) GetString_() string { - if m != nil && m.String_ != nil { - return *m.String_ - } - return "" -} - -func (m *Field) GetBytes() []byte { - if m != nil { - return m.Bytes - } - return nil -} - -type Tag struct { - Key *string `protobuf:"bytes,1,req" json:"Key,omitempty"` - Value *string `protobuf:"bytes,2,req" json:"Value,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} - -func (m *Tag) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *Tag) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Point struct { - Name *string `protobuf:"bytes,1,req" json:"Name,omitempty"` - Time *int64 `protobuf:"varint,2,req" json:"Time,omitempty"` - Fields []*Field `protobuf:"bytes,3,rep" json:"Fields,omitempty"` - Tags []*Tag `protobuf:"bytes,4,rep" json:"Tags,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Point) Reset() { *m = Point{} } -func (m *Point) String() string { return proto.CompactTextString(m) } -func (*Point) ProtoMessage() {} - -func (m *Point) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Point) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *Point) GetFields() []*Field { - if m != nil { - return m.Fields - } - return nil -} - -func (m *Point) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -type WriteShardResponse struct { - Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WriteShardResponse) Reset() { *m = WriteShardResponse{} } -func (m *WriteShardResponse) String() string { return proto.CompactTextString(m) } -func (*WriteShardResponse) ProtoMessage() {} - -func (m *WriteShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *WriteShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type MapShardRequest struct { - ShardID *uint64 `protobuf:"varint,1,req" json:"ShardID,omitempty"` - Query *string `protobuf:"bytes,2,req" json:"Query,omitempty"` - ChunkSize *int32 `protobuf:"varint,3,req" json:"ChunkSize,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardRequest) Reset() { *m = MapShardRequest{} } -func (m *MapShardRequest) String() string { return proto.CompactTextString(m) } -func (*MapShardRequest) ProtoMessage() {} - -func (m *MapShardRequest) GetShardID() uint64 { - if m != nil && m.ShardID != nil { - return *m.ShardID - } - return 0 -} - -func (m *MapShardRequest) GetQuery() string { - if m != nil && m.Query != nil { - return *m.Query - } - return "" -} - -func (m *MapShardRequest) GetChunkSize() int32 { - if m != nil && m.ChunkSize != nil { - return *m.ChunkSize - } - return 0 -} - -type MapShardResponse struct { - Code *int32 `protobuf:"varint,1,req" json:"Code,omitempty"` - Message *string `protobuf:"bytes,2,opt" json:"Message,omitempty"` - Data []byte `protobuf:"bytes,3,opt" json:"Data,omitempty"` - TagSets []string `protobuf:"bytes,4,rep" json:"TagSets,omitempty"` - Fields []string `protobuf:"bytes,5,rep" json:"Fields,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MapShardResponse) Reset() { *m = MapShardResponse{} } -func (m *MapShardResponse) String() string { return proto.CompactTextString(m) } -func (*MapShardResponse) ProtoMessage() {} - -func (m *MapShardResponse) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *MapShardResponse) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *MapShardResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *MapShardResponse) GetTagSets() []string { - if m != nil { - return m.TagSets - } - return nil -} - -func (m *MapShardResponse) GetFields() []string { - if m != nil { - return m.Fields - } - return nil -} - -func init() { -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto deleted file mode 100644 index e38a9de5b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/internal/data.proto +++ /dev/null @@ -1,49 +0,0 @@ -package internal; - -message WriteShardRequest { - required uint64 ShardID = 1; - repeated Point Points = 2; -} - -message Field { - required string Name = 1; - oneof Value { - int32 Int32 = 2; - int64 Int64 = 3; - double Float64 = 4; - bool Bool = 5; - string String = 6; - bytes Bytes = 7; - } -} - -message Tag { - required string Key = 1; - required string Value = 2; -} - -message Point { - required string Name = 1; - required int64 Time = 2; - repeated Field Fields = 3; - repeated Tag Tags = 4; -} - -message WriteShardResponse { - required int32 Code = 1; - optional string Message = 2; -} - -message MapShardRequest { - required uint64 ShardID = 1; - required string Query = 2; - required int32 ChunkSize = 3; -} - -message MapShardResponse { - required int32 Code = 1; - optional string Message = 2; - optional bytes Data = 3; - repeated string TagSets = 4; - repeated string Fields = 5; -} \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go deleted file mode 100644 index b5a78636d..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer.go +++ /dev/null @@ -1,314 +0,0 @@ -package cluster - -import ( - "errors" - "fmt" - "log" - "os" - "strings" - "sync" - "time" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// ConsistencyLevel represent a required replication criteria before a write can -// be returned as successful -type ConsistencyLevel int - -const ( - // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet - ConsistencyLevelAny ConsistencyLevel = iota - - // ConsistencyLevelOne requires at least one data node acknowledged a write - ConsistencyLevelOne - - // ConsistencyLevelOne requires a quorum of data nodes to acknowledge a write - ConsistencyLevelQuorum - - // ConsistencyLevelAll requires all data nodes to acknowledge a write - ConsistencyLevelAll -) - -var ( - // ErrTimeout is returned when a write times out. - ErrTimeout = errors.New("timeout") - - // ErrPartialWrite is returned when a write partially succeeds but does - // not meet the requested consistency level. - ErrPartialWrite = errors.New("partial write") - - // ErrWriteFailed is returned when no writes succeeded. - ErrWriteFailed = errors.New("write failed") - - // ErrInvalidConsistencyLevel is returned when parsing the string version - // of a consistency level. - ErrInvalidConsistencyLevel = errors.New("invalid consistency level") -) - -func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { - switch strings.ToLower(level) { - case "any": - return ConsistencyLevelAny, nil - case "one": - return ConsistencyLevelOne, nil - case "quorum": - return ConsistencyLevelQuorum, nil - case "all": - return ConsistencyLevelAll, nil - default: - return 0, ErrInvalidConsistencyLevel - } -} - -// PointsWriter handles writes across multiple local and remote data nodes. -type PointsWriter struct { - mu sync.RWMutex - closing chan struct{} - WriteTimeout time.Duration - Logger *log.Logger - - MetaStore interface { - NodeID() uint64 - Database(name string) (di *meta.DatabaseInfo, err error) - RetentionPolicy(database, policy string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, retentionPolicy string, shardID uint64) error - WriteToShard(shardID uint64, points []tsdb.Point) error - } - - ShardWriter interface { - WriteShard(shardID, ownerID uint64, points []tsdb.Point) error - } - - HintedHandoff interface { - WriteShard(shardID, ownerID uint64, points []tsdb.Point) error - } -} - -// NewPointsWriter returns a new instance of PointsWriter for a node. -func NewPointsWriter() *PointsWriter { - return &PointsWriter{ - closing: make(chan struct{}), - WriteTimeout: DefaultWriteTimeout, - Logger: log.New(os.Stderr, "[write] ", log.LstdFlags), - } -} - -// ShardMapping contains a mapping of a shards to a points. -type ShardMapping struct { - Points map[uint64][]tsdb.Point // The points associated with a shard ID - Shards map[uint64]*meta.ShardInfo // The shards that have been mapped, keyed by shard ID -} - -// NewShardMapping creates an empty ShardMapping -func NewShardMapping() *ShardMapping { - return &ShardMapping{ - Points: map[uint64][]tsdb.Point{}, - Shards: map[uint64]*meta.ShardInfo{}, - } -} - -// MapPoint maps a point to shard -func (s *ShardMapping) MapPoint(shardInfo *meta.ShardInfo, p tsdb.Point) { - points, ok := s.Points[shardInfo.ID] - if !ok { - s.Points[shardInfo.ID] = []tsdb.Point{p} - } else { - s.Points[shardInfo.ID] = append(points, p) - } - s.Shards[shardInfo.ID] = shardInfo -} - -func (w *PointsWriter) Open() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closing == nil { - w.closing = make(chan struct{}) - } - return nil -} - -func (w *PointsWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closing != nil { - close(w.closing) - w.closing = nil - } - return nil -} - -// MapShards maps the points contained in wp to a ShardMapping. If a point -// maps to a shard group or shard that does not currently exist, it will be -// created before returning the mapping. -func (w *PointsWriter) MapShards(wp *WritePointsRequest) (*ShardMapping, error) { - - // holds the start time ranges for required shard groups - timeRanges := map[time.Time]*meta.ShardGroupInfo{} - - rp, err := w.MetaStore.RetentionPolicy(wp.Database, wp.RetentionPolicy) - if err != nil { - return nil, err - } - - for _, p := range wp.Points { - timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] = nil - } - - // holds all the shard groups and shards that are required for writes - for t := range timeRanges { - sg, err := w.MetaStore.CreateShardGroupIfNotExists(wp.Database, wp.RetentionPolicy, t) - if err != nil { - return nil, err - } - timeRanges[t] = sg - } - - mapping := NewShardMapping() - for _, p := range wp.Points { - sg := timeRanges[p.Time().Truncate(rp.ShardGroupDuration)] - sh := sg.ShardFor(p.HashID()) - mapping.MapPoint(&sh, p) - } - return mapping, nil -} - -// WritePoints writes across multiple local and remote data nodes according the consistency level. -func (w *PointsWriter) WritePoints(p *WritePointsRequest) error { - if p.RetentionPolicy == "" { - db, err := w.MetaStore.Database(p.Database) - if err != nil { - return err - } else if db == nil { - return influxdb.ErrDatabaseNotFound(p.Database) - } - p.RetentionPolicy = db.DefaultRetentionPolicy - } - - shardMappings, err := w.MapShards(p) - if err != nil { - return err - } - - // Write each shard in it's own goroutine and return as soon - // as one fails. - ch := make(chan error, len(shardMappings.Points)) - for shardID, points := range shardMappings.Points { - go func(shard *meta.ShardInfo, database, retentionPolicy string, points []tsdb.Point) { - ch <- w.writeToShard(shard, p.Database, p.RetentionPolicy, p.ConsistencyLevel, points) - }(shardMappings.Shards[shardID], p.Database, p.RetentionPolicy, points) - } - - for range shardMappings.Points { - select { - case <-w.closing: - return ErrWriteFailed - case err := <-ch: - if err != nil { - return err - } - } - } - return nil -} - -// writeToShards writes points to a shard and ensures a write consistency level has been met. If the write -// partially succeeds, ErrPartialWrite is returned. -func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPolicy string, - consistency ConsistencyLevel, points []tsdb.Point) error { - // The required number of writes to achieve the requested consistency level - required := len(shard.OwnerIDs) - switch consistency { - case ConsistencyLevelAny, ConsistencyLevelOne: - required = 1 - case ConsistencyLevelQuorum: - required = required/2 + 1 - } - - // response channel for each shard writer go routine - ch := make(chan error, len(shard.OwnerIDs)) - - for _, nodeID := range shard.OwnerIDs { - go func(shardID, nodeID uint64, points []tsdb.Point) { - if w.MetaStore.NodeID() == nodeID { - err := w.TSDBStore.WriteToShard(shardID, points) - // If we've written to shard that should exist on the current node, but the store has - // not actually created this shard, tell it to create it and retry the write - if err == tsdb.ErrShardNotFound { - err = w.TSDBStore.CreateShard(database, retentionPolicy, shardID) - if err != nil { - ch <- err - return - } - err = w.TSDBStore.WriteToShard(shardID, points) - } - ch <- err - return - } - - err := w.ShardWriter.WriteShard(shardID, nodeID, points) - if err != nil && tsdb.IsRetryable(err) { - // The remote write failed so queue it via hinted handoff - hherr := w.HintedHandoff.WriteShard(shardID, nodeID, points) - - // If the write consistency level is ANY, then a successful hinted handoff can - // be considered a successful write so send nil to the response channel - // otherwise, let the original error propogate to the response channel - if hherr == nil && consistency == ConsistencyLevelAny { - ch <- nil - return - } - } - ch <- err - - }(shard.ID, nodeID, points) - } - - var wrote int - timeout := time.After(w.WriteTimeout) - var writeError error - for _, nodeID := range shard.OwnerIDs { - select { - case <-w.closing: - return ErrWriteFailed - case <-timeout: - // return timeout error to caller - return ErrTimeout - case err := <-ch: - // If the write returned an error, continue to the next response - if err != nil { - w.Logger.Printf("write failed for shard %d on node %d: %v", shard.ID, nodeID, err) - - // Keep track of the first error we see to return back to the client - if writeError == nil { - writeError = err - } - continue - } - - wrote += 1 - } - } - - // We wrote the required consistency level - if wrote >= required { - return nil - } - - if wrote > 0 { - return ErrPartialWrite - } - - if writeError != nil { - return fmt.Errorf("write failed: %v", writeError) - } - - return ErrWriteFailed -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go deleted file mode 100644 index 321f731d0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/points_writer_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package cluster_test - -import ( - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// Ensures the points writer maps a single point to a single shard. -func TestPointsWriter_MapShards_One(t *testing.T) { - ms := MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return &rp.ShardGroups[0], nil - } - - c := cluster.PointsWriter{MetaStore: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - pr.AddPoint("cpu", 1.0, time.Now(), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 1; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } -} - -// Ensures the points writer maps a multiple points across shard group boundaries. -func TestPointsWriter_MapShards_Multiple(t *testing.T) { - ms := MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []uint64{1, 2, 3}) - AttachShardGroupInfo(rp, []uint64{1, 2, 3}) - - ms.NodeIDFn = func() uint64 { return 1 } - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - - c := cluster.PointsWriter{MetaStore: ms} - pr := &cluster.WritePointsRequest{ - Database: "mydb", - RetentionPolicy: "myrp", - ConsistencyLevel: cluster.ConsistencyLevelOne, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - var ( - shardMappings *cluster.ShardMapping - err error - ) - if shardMappings, err = c.MapShards(pr); err != nil { - t.Fatalf("unexpected an error: %v", err) - } - - if exp := 2; len(shardMappings.Points) != exp { - t.Errorf("MapShards() len mismatch. got %v, exp %v", len(shardMappings.Points), exp) - } - - for _, points := range shardMappings.Points { - // First shard shoud have 1 point w/ first point added - if len(points) == 1 && points[0].Time() != pr.Points[0].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[0].Time()) - } - - // Second shard shoud have the last two points added - if len(points) == 2 && points[0].Time() != pr.Points[1].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[0].Time(), pr.Points[1].Time()) - } - - if len(points) == 2 && points[1].Time() != pr.Points[2].Time() { - t.Fatalf("MapShards() value mismatch. got %v, exp %v", points[1].Time(), pr.Points[2].Time()) - } - } -} - -func TestPointsWriter_WritePoints(t *testing.T) { - tests := []struct { - name string - database string - retentionPolicy string - consistency cluster.ConsistencyLevel - - // the responses returned by each shard write call. node ID 1 = pos 0 - err []error - expErr error - }{ - // Consistency one - { - name: "write one success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write one error", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Consistency any - { - name: "write any success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - // Consistency all - { - name: "write all success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, nil, nil}, - expErr: nil, - }, - { - name: "write all, 2/3, partial write", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write all, 1/3 (failure)", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAll, - err: []error{nil, fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: cluster.ErrPartialWrite, - }, - - // Consistency quorum - { - name: "write quorum, 1/3 failure", - consistency: cluster.ConsistencyLevelQuorum, - database: "mydb", - retentionPolicy: "myrp", - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), nil}, - expErr: cluster.ErrPartialWrite, - }, - { - name: "write quorum, 2/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, fmt.Errorf("a failure")}, - expErr: nil, - }, - { - name: "write quorum, 3/3 success", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelQuorum, - err: []error{nil, nil, nil}, - expErr: nil, - }, - - // Error write error - { - name: "no writes succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelOne, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: fmt.Errorf("write failed: a failure"), - }, - - // Hinted handoff w/ ANY - { - name: "hinted handoff write succeed", - database: "mydb", - retentionPolicy: "myrp", - consistency: cluster.ConsistencyLevelAny, - err: []error{fmt.Errorf("a failure"), fmt.Errorf("a failure"), fmt.Errorf("a failure")}, - expErr: nil, - }, - - // Write to non-existant database - { - name: "write to non-existant database", - database: "doesnt_exist", - retentionPolicy: "", - consistency: cluster.ConsistencyLevelAny, - err: []error{nil, nil, nil}, - expErr: fmt.Errorf("database not found: doesnt_exist"), - }, - } - - for _, test := range tests { - - pr := &cluster.WritePointsRequest{ - Database: test.database, - RetentionPolicy: test.retentionPolicy, - ConsistencyLevel: test.consistency, - } - - // Three points that range over the shardGroup duration (1h) and should map to two - // distinct shards - pr.AddPoint("cpu", 1.0, time.Unix(0, 0), nil) - pr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - pr.AddPoint("cpu", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - // copy to prevent data race - theTest := test - sm := cluster.NewShardMapping() - sm.MapPoint( - &meta.ShardInfo{ID: uint64(1), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}}, - pr.Points[0]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}}, - pr.Points[1]) - sm.MapPoint( - &meta.ShardInfo{ID: uint64(2), OwnerIDs: []uint64{uint64(1), uint64(2), uint64(3)}}, - pr.Points[2]) - - // Local cluster.Node ShardWriter - // lock on the write increment since these functions get called in parallel - var mu sync.Mutex - sw := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[int(nodeID)-1] - }, - } - - store := &fakeStore{ - WriteFn: func(shardID uint64, points []tsdb.Point) error { - mu.Lock() - defer mu.Unlock() - return theTest.err[0] - }, - } - - hh := &fakeShardWriter{ - ShardWriteFn: func(shardID, nodeID uint64, points []tsdb.Point) error { - return nil - }, - } - - ms := NewMetaStore() - ms.DatabaseFn = func(database string) (*meta.DatabaseInfo, error) { - return nil, nil - } - ms.NodeIDFn = func() uint64 { return 1 } - c := cluster.NewPointsWriter() - c.MetaStore = ms - c.ShardWriter = sw - c.TSDBStore = store - c.HintedHandoff = hh - - err := c.WritePoints(pr) - if err == nil && test.expErr != nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - - if err != nil && test.expErr == nil { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - if err != nil && test.expErr != nil && err.Error() != test.expErr.Error() { - t.Errorf("PointsWriter.WritePoints(): '%s' error: got %v, exp %v", test.name, err, test.expErr) - } - } -} - -var shardID uint64 - -type fakeShardWriter struct { - ShardWriteFn func(shardID, nodeID uint64, points []tsdb.Point) error -} - -func (f *fakeShardWriter) WriteShard(shardID, nodeID uint64, points []tsdb.Point) error { - return f.ShardWriteFn(shardID, nodeID, points) -} - -type fakeStore struct { - WriteFn func(shardID uint64, points []tsdb.Point) error - CreateShardfn func(database, retentionPolicy string, shardID uint64) error -} - -func (f *fakeStore) WriteToShard(shardID uint64, points []tsdb.Point) error { - return f.WriteFn(shardID, points) -} - -func (f *fakeStore) CreateShard(database, retentionPolicy string, shardID uint64) error { - return f.CreateShardfn(database, retentionPolicy, shardID) -} - -func NewMetaStore() *MetaStore { - ms := &MetaStore{} - rp := NewRetentionPolicy("myp", time.Hour, 3) - AttachShardGroupInfo(rp, []uint64{1, 2, 3}) - AttachShardGroupInfo(rp, []uint64{1, 2, 3}) - - ms.RetentionPolicyFn = func(db, retentionPolicy string) (*meta.RetentionPolicyInfo, error) { - return rp, nil - } - - ms.CreateShardGroupIfNotExistsFn = func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - for i, sg := range rp.ShardGroups { - if timestamp.Equal(sg.StartTime) || timestamp.After(sg.StartTime) && timestamp.Before(sg.EndTime) { - return &rp.ShardGroups[i], nil - } - } - panic("should not get here") - } - return ms -} - -type MetaStore struct { - NodeIDFn func() uint64 - RetentionPolicyFn func(database, name string) (*meta.RetentionPolicyInfo, error) - CreateShardGroupIfNotExistsFn func(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) - DatabaseFn func(database string) (*meta.DatabaseInfo, error) - ShardOwnerFn func(shardID uint64) (string, string, *meta.ShardGroupInfo) -} - -func (m MetaStore) NodeID() uint64 { return m.NodeIDFn() } - -func (m MetaStore) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) { - return m.RetentionPolicyFn(database, name) -} - -func (m MetaStore) CreateShardGroupIfNotExists(database, policy string, timestamp time.Time) (*meta.ShardGroupInfo, error) { - return m.CreateShardGroupIfNotExistsFn(database, policy, timestamp) -} - -func (m MetaStore) Database(database string) (*meta.DatabaseInfo, error) { - return m.DatabaseFn(database) -} - -func (m MetaStore) ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) { - return m.ShardOwnerFn(shardID) -} - -func NewRetentionPolicy(name string, duration time.Duration, nodeCount int) *meta.RetentionPolicyInfo { - shards := []meta.ShardInfo{} - ownerIDs := []uint64{} - for i := 1; i <= nodeCount; i++ { - ownerIDs = append(ownerIDs, uint64(i)) - } - - // each node is fully replicated with each other - shards = append(shards, meta.ShardInfo{ - ID: nextShardID(), - OwnerIDs: ownerIDs, - }) - - rp := &meta.RetentionPolicyInfo{ - Name: "myrp", - ReplicaN: nodeCount, - Duration: duration, - ShardGroupDuration: duration, - ShardGroups: []meta.ShardGroupInfo{ - meta.ShardGroupInfo{ - ID: nextShardID(), - StartTime: time.Unix(0, 0), - EndTime: time.Unix(0, 0).Add(duration).Add(-1), - Shards: shards, - }, - }, - } - return rp -} - -func AttachShardGroupInfo(rp *meta.RetentionPolicyInfo, ownerIDs []uint64) { - var startTime, endTime time.Time - if len(rp.ShardGroups) == 0 { - startTime = time.Unix(0, 0) - } else { - startTime = rp.ShardGroups[len(rp.ShardGroups)-1].StartTime.Add(rp.ShardGroupDuration) - } - endTime = startTime.Add(rp.ShardGroupDuration).Add(-1) - - sh := meta.ShardGroupInfo{ - ID: uint64(len(rp.ShardGroups) + 1), - StartTime: startTime, - EndTime: endTime, - Shards: []meta.ShardInfo{ - meta.ShardInfo{ - ID: nextShardID(), - OwnerIDs: ownerIDs, - }, - }, - } - rp.ShardGroups = append(rp.ShardGroups, sh) -} - -func nextShardID() uint64 { - return atomic.AddUint64(&shardID, 1) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go deleted file mode 100644 index f3eca790c..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc.go +++ /dev/null @@ -1,229 +0,0 @@ -package cluster - -import ( - "time" - - "github.com/gogo/protobuf/proto" - "github.com/influxdb/influxdb/cluster/internal" - "github.com/influxdb/influxdb/tsdb" -) - -//go:generate protoc --gogo_out=. internal/data.proto - -// MapShardRequest represents the request to map a remote shard for a query. -type MapShardRequest struct { - pb internal.MapShardRequest -} - -func (m *MapShardRequest) ShardID() uint64 { return m.pb.GetShardID() } -func (m *MapShardRequest) Query() string { return m.pb.GetQuery() } -func (m *MapShardRequest) ChunkSize() int32 { return m.pb.GetChunkSize() } - -func (m *MapShardRequest) SetShardID(id uint64) { m.pb.ShardID = &id } -func (m *MapShardRequest) SetQuery(query string) { m.pb.Query = &query } -func (m *MapShardRequest) SetChunkSize(chunkSize int32) { m.pb.ChunkSize = &chunkSize } - -// MarshalBinary encodes the object to a binary format. -func (m *MapShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&m.pb) -} - -// UnmarshalBinary populates MapShardRequest from a binary format. -func (m *MapShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &m.pb); err != nil { - return err - } - return nil -} - -// MapShardResponse represents the response returned from a remote MapShardRequest call -type MapShardResponse struct { - pb internal.MapShardResponse -} - -func NewMapShardResponse(code int, message string) *MapShardResponse { - m := &MapShardResponse{} - m.SetCode(code) - m.SetMessage(message) - return m -} - -func (r *MapShardResponse) Code() int { return int(r.pb.GetCode()) } -func (r *MapShardResponse) Message() string { return r.pb.GetMessage() } -func (r *MapShardResponse) TagSets() []string { return r.pb.GetTagSets() } -func (r *MapShardResponse) Fields() []string { return r.pb.GetFields() } -func (r *MapShardResponse) Data() []byte { return r.pb.GetData() } - -func (r *MapShardResponse) SetCode(code int) { r.pb.Code = proto.Int32(int32(code)) } -func (r *MapShardResponse) SetMessage(message string) { r.pb.Message = &message } -func (r *MapShardResponse) SetTagSets(tagsets []string) { r.pb.TagSets = tagsets } -func (r *MapShardResponse) SetFields(fields []string) { r.pb.Fields = fields } -func (r *MapShardResponse) SetData(data []byte) { r.pb.Data = data } - -// MarshalBinary encodes the object to a binary format. -func (r *MapShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&r.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (r *MapShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &r.pb); err != nil { - return err - } - return nil -} - -// WritePointsRequest represents a request to write point data to the cluster -type WritePointsRequest struct { - Database string - RetentionPolicy string - ConsistencyLevel ConsistencyLevel - Points []tsdb.Point -} - -// AddPoint adds a point to the WritePointRequest with field name 'value' -func (w *WritePointsRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - w.Points = append(w.Points, tsdb.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - )) -} - -// WriteShardRequest represents the a request to write a slice of points to a shard -type WriteShardRequest struct { - pb internal.WriteShardRequest -} - -// WriteShardResponse represents the response returned from a remote WriteShardRequest call -type WriteShardResponse struct { - pb internal.WriteShardResponse -} - -func (w *WriteShardRequest) SetShardID(id uint64) { w.pb.ShardID = &id } -func (w *WriteShardRequest) ShardID() uint64 { return w.pb.GetShardID() } - -func (w *WriteShardRequest) Points() []tsdb.Point { return w.unmarshalPoints() } - -func (w *WriteShardRequest) AddPoint(name string, value interface{}, timestamp time.Time, tags map[string]string) { - w.AddPoints([]tsdb.Point{tsdb.NewPoint( - name, tags, map[string]interface{}{"value": value}, timestamp, - )}) -} - -func (w *WriteShardRequest) AddPoints(points []tsdb.Point) { - w.pb.Points = append(w.pb.Points, w.marshalPoints(points)...) -} - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardRequest) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -func (w *WriteShardRequest) marshalPoints(points []tsdb.Point) []*internal.Point { - pts := make([]*internal.Point, len(points)) - for i, p := range points { - fields := []*internal.Field{} - for k, v := range p.Fields() { - name := k - f := &internal.Field{ - Name: &name, - } - switch t := v.(type) { - case int: - f.Int64 = proto.Int64(int64(t)) - case int32: - f.Int32 = proto.Int32(t) - case int64: - f.Int64 = proto.Int64(t) - case float64: - f.Float64 = proto.Float64(t) - case bool: - f.Bool = proto.Bool(t) - case string: - f.String_ = proto.String(t) - case []byte: - f.Bytes = t - } - fields = append(fields, f) - } - - tags := []*internal.Tag{} - for k, v := range p.Tags() { - key := k - value := v - tags = append(tags, &internal.Tag{ - Key: &key, - Value: &value, - }) - } - name := p.Name() - pts[i] = &internal.Point{ - Name: &name, - Time: proto.Int64(p.Time().UnixNano()), - Fields: fields, - Tags: tags, - } - - } - return pts -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardRequest) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} - -func (w *WriteShardRequest) unmarshalPoints() []tsdb.Point { - points := make([]tsdb.Point, len(w.pb.GetPoints())) - for i, p := range w.pb.GetPoints() { - pt := tsdb.NewPoint( - p.GetName(), map[string]string{}, - map[string]interface{}{}, time.Unix(0, p.GetTime())) - - for _, f := range p.GetFields() { - n := f.GetName() - if f.Int32 != nil { - pt.AddField(n, f.GetInt32()) - } else if f.Int64 != nil { - pt.AddField(n, f.GetInt64()) - } else if f.Float64 != nil { - pt.AddField(n, f.GetFloat64()) - } else if f.Bool != nil { - pt.AddField(n, f.GetBool()) - } else if f.String_ != nil { - pt.AddField(n, f.GetString_()) - } else { - pt.AddField(n, f.GetBytes()) - } - } - - tags := tsdb.Tags{} - for _, t := range p.GetTags() { - tags[t.GetKey()] = t.GetValue() - } - pt.SetTags(tags) - points[i] = pt - } - return points -} - -func (w *WriteShardResponse) SetCode(code int) { w.pb.Code = proto.Int32(int32(code)) } -func (w *WriteShardResponse) SetMessage(message string) { w.pb.Message = &message } - -func (w *WriteShardResponse) Code() int { return int(w.pb.GetCode()) } -func (w *WriteShardResponse) Message() string { return w.pb.GetMessage() } - -// MarshalBinary encodes the object to a binary format. -func (w *WriteShardResponse) MarshalBinary() ([]byte, error) { - return proto.Marshal(&w.pb) -} - -// UnmarshalBinary populates WritePointRequest from a binary format. -func (w *WriteShardResponse) UnmarshalBinary(buf []byte) error { - if err := proto.Unmarshal(buf, &w.pb); err != nil { - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go deleted file mode 100644 index 4e42cd5d6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/rpc_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package cluster - -import ( - "testing" - "time" -) - -func TestWriteShardRequestBinary(t *testing.T) { - sr := &WriteShardRequest{} - - sr.SetShardID(uint64(1)) - if exp := uint64(1); sr.ShardID() != exp { - t.Fatalf("ShardID mismatch: got %v, exp %v", sr.ShardID(), exp) - } - - sr.AddPoint("cpu", 1.0, time.Unix(0, 0), map[string]string{"host": "serverA"}) - sr.AddPoint("cpu", 2.0, time.Unix(0, 0).Add(time.Hour), nil) - sr.AddPoint("cpu_load", 3.0, time.Unix(0, 0).Add(time.Hour+time.Second), nil) - - b, err := sr.MarshalBinary() - if err != nil { - t.Fatalf("WritePointsRequest.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsRequest.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardRequest{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsRequest.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.ShardID() != sr.ShardID() { - t.Errorf("ShardID mismatch: got %v, exp %v", got.ShardID(), sr.ShardID()) - } - - if len(got.Points()) != len(sr.Points()) { - t.Errorf("Points count mismatch: got %v, exp %v", len(got.Points()), len(sr.Points())) - } - - srPoints := sr.Points() - gotPoints := got.Points() - for i, p := range srPoints { - g := gotPoints[i] - - if g.Name() != p.Name() { - t.Errorf("Point %d name mismatch: got %v, exp %v", i, g.Name(), p.Name()) - } - - if !g.Time().Equal(p.Time()) { - t.Errorf("Point %d time mismatch: got %v, exp %v", i, g.Time(), p.Time()) - } - - if g.HashID() != p.HashID() { - t.Errorf("Point #%d HashID() mismatch: got %v, exp %v", i, g.HashID(), p.HashID()) - } - - for k, v := range p.Tags() { - if g.Tags()[k] != v { - t.Errorf("Point #%d tag mismatch: got %v, exp %v", i, k, v) - } - } - - if len(p.Fields()) != len(g.Fields()) { - t.Errorf("Point %d field count mismatch: got %v, exp %v", i, len(g.Fields()), len(p.Fields())) - } - - for j, f := range p.Fields() { - if g.Fields()[j] != f { - t.Errorf("Point %d field mismatch: got %v, exp %v", i, g.Fields()[j], f) - } - } - } -} - -func TestWriteShardResponseBinary(t *testing.T) { - sr := &WriteShardResponse{} - sr.SetCode(10) - sr.SetMessage("foo") - b, err := sr.MarshalBinary() - - if exp := 10; sr.Code() != exp { - t.Fatalf("Code mismatch: got %v, exp %v", sr.Code(), exp) - } - - if exp := "foo"; sr.Message() != exp { - t.Fatalf("Message mismatch: got %v, exp %v", sr.Message(), exp) - } - - if err != nil { - t.Fatalf("WritePointsResponse.MarshalBinary() failed: %v", err) - } - if len(b) == 0 { - t.Fatalf("WritePointsResponse.MarshalBinary() returned 0 bytes") - } - - got := &WriteShardResponse{} - if err := got.UnmarshalBinary(b); err != nil { - t.Fatalf("WritePointsResponse.UnmarshalMarshalBinary() failed: %v", err) - } - - if got.Code() != sr.Code() { - t.Errorf("Code mismatch: got %v, exp %v", got.Code(), sr.Code()) - } - - if got.Message() != sr.Message() { - t.Errorf("Message mismatch: got %v, exp %v", got.Message(), sr.Message()) - } - -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go deleted file mode 100644 index 896b0ce1e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service.go +++ /dev/null @@ -1,338 +0,0 @@ -package cluster - -import ( - "encoding/binary" - "encoding/json" - "fmt" - "io" - "log" - "net" - "os" - "strings" - "sync" - - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" -) - -// MaxMessageSize defines how large a message can be before we reject it -const MaxMessageSize = 1024 * 1024 * 1024 // 1GB - -// MuxHeader is the header byte used in the TCP mux. -const MuxHeader = 2 - -// Service processes data received over raw TCP connections. -type Service struct { - mu sync.RWMutex - - wg sync.WaitGroup - closing chan struct{} - - Listener net.Listener - - MetaStore interface { - ShardOwner(shardID uint64) (string, string, *meta.ShardGroupInfo) - } - - TSDBStore interface { - CreateShard(database, policy string, shardID uint64) error - WriteToShard(shardID uint64, points []tsdb.Point) error - CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) - } - - Logger *log.Logger -} - -// NewService returns a new instance of Service. -func NewService(c Config) *Service { - return &Service{ - closing: make(chan struct{}), - Logger: log.New(os.Stderr, "[tcp] ", log.LstdFlags), - } -} - -// Open opens the network listener and begins serving requests. -func (s *Service) Open() error { - - s.Logger.Println("Starting cluster service") - // Begin serving conections. - s.wg.Add(1) - go s.serve() - - return nil -} - -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l -} - -// serve accepts connections from the listener and handles them. -func (s *Service) serve() { - defer s.wg.Done() - - for { - // Check if the service is shutting down. - select { - case <-s.closing: - return - default: - } - - // Accept the next connection. - conn, err := s.Listener.Accept() - if err != nil { - if strings.Contains(err.Error(), "connection closed") { - s.Logger.Printf("cluster service accept error: %s", err) - return - } - s.Logger.Printf("accept error: %s", err) - continue - } - - // Delegate connection handling to a separate goroutine. - s.wg.Add(1) - go func() { - defer s.wg.Done() - s.handleConn(conn) - }() - } -} - -// Close shuts down the listener and waits for all connections to finish. -func (s *Service) Close() error { - if s.Listener != nil { - s.Listener.Close() - } - - // Shut down all handlers. - close(s.closing) - // s.wg.Wait() // FIXME(benbjohnson) - - return nil -} - -// handleConn services an individual TCP connection. -func (s *Service) handleConn(conn net.Conn) { - // Ensure connection is closed when service is closed. - closing := make(chan struct{}) - defer close(closing) - go func() { - select { - case <-closing: - case <-s.closing: - } - conn.Close() - }() - - s.Logger.Printf("accept remote write connection from %v\n", conn.RemoteAddr()) - defer func() { - s.Logger.Printf("close remote write connection from %v\n", conn.RemoteAddr()) - }() - for { - // Read type-length-value. - typ, buf, err := ReadTLV(conn) - if err != nil { - if strings.HasSuffix(err.Error(), "EOF") { - return - } - s.Logger.Printf("unable to read type-length-value %s", err) - return - } - - // Delegate message processing by type. - switch typ { - case writeShardRequestMessage: - err := s.processWriteShardRequest(buf) - if err != nil { - s.Logger.Printf("process write shard error: %s", err) - } - s.writeShardResponse(conn, err) - case mapShardRequestMessage: - err := s.processMapShardRequest(conn, buf) - if err != nil { - s.Logger.Printf("process map shard error: %s", err) - if err := writeMapShardResponseMessage(conn, NewMapShardResponse(1, err.Error())); err != nil { - s.Logger.Printf("process map shard error writing response: %s", err.Error()) - } - } - default: - s.Logger.Printf("cluster service message type not found: %d", typ) - } - } -} - -func (s *Service) processWriteShardRequest(buf []byte) error { - // Build request - var req WriteShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - err := s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) - - // We may have received a write for a shard that we don't have locally because the - // sending node may have just created the shard (via the metastore) and the write - // arrived before the local store could create the shard. In this case, we need - // to check the metastore to determine what database and retention policy this - // shard should reside within. - if err == tsdb.ErrShardNotFound { - - // Query the metastore for the owner of this shard - database, retentionPolicy, sgi := s.MetaStore.ShardOwner(req.ShardID()) - if sgi == nil { - // If we can't find it, then we need to drop this request - // as it is no longer valid. This could happen if writes were queued via - // hinted handoff and delivered after a shard group was deleted. - s.Logger.Printf("drop write request: shard=%d", req.ShardID()) - return nil - } - - err = s.TSDBStore.CreateShard(database, retentionPolicy, req.ShardID()) - if err != nil { - return err - } - return s.TSDBStore.WriteToShard(req.ShardID(), req.Points()) - } - - if err != nil { - return fmt.Errorf("write shard %d: %s", req.ShardID(), err) - } - - return nil -} - -func (s *Service) writeShardResponse(w io.Writer, e error) { - // Build response. - var resp WriteShardResponse - if e != nil { - resp.SetCode(1) - resp.SetMessage(e.Error()) - } else { - resp.SetCode(0) - } - - // Marshal response to binary. - buf, err := resp.MarshalBinary() - if err != nil { - s.Logger.Printf("error marshalling shard response: %s", err) - return - } - - // Write to connection. - if err := WriteTLV(w, writeShardResponseMessage, buf); err != nil { - s.Logger.Printf("write shard response error: %s", err) - } -} - -func (s *Service) processMapShardRequest(w io.Writer, buf []byte) error { - // Decode request - var req MapShardRequest - if err := req.UnmarshalBinary(buf); err != nil { - return err - } - - m, err := s.TSDBStore.CreateMapper(req.ShardID(), req.Query(), int(req.ChunkSize())) - if err != nil { - return fmt.Errorf("create mapper: %s", err) - } - if m == nil { - return writeMapShardResponseMessage(w, NewMapShardResponse(0, "")) - } - - if err := m.Open(); err != nil { - return fmt.Errorf("mapper open: %s", err) - } - defer m.Close() - - var metaSent bool - for { - var resp MapShardResponse - - if !metaSent { - resp.SetTagSets(m.TagSets()) - resp.SetFields(m.Fields()) - metaSent = true - } - - chunk, err := m.NextChunk() - if err != nil { - return fmt.Errorf("next chunk: %s", err) - } - if chunk != nil { - b, err := json.Marshal(chunk) - if err != nil { - return fmt.Errorf("encoding: %s", err) - } - resp.SetData(b) - } - - // Write to connection. - resp.SetCode(0) - if err := writeMapShardResponseMessage(w, &resp); err != nil { - return err - } - - if chunk == nil { - // All mapper data sent. - return nil - } - } -} - -func writeMapShardResponseMessage(w io.Writer, msg *MapShardResponse) error { - buf, err := msg.MarshalBinary() - if err != nil { - return err - } - return WriteTLV(w, mapShardResponseMessage, buf) -} - -// ReadTLV reads a type-length-value record from r. -func ReadTLV(r io.Reader) (byte, []byte, error) { - var typ [1]byte - if _, err := io.ReadFull(r, typ[:]); err != nil { - return 0, nil, fmt.Errorf("read message type: %s", err) - } - - // Read the size of the message. - var sz int64 - if err := binary.Read(r, binary.BigEndian, &sz); err != nil { - return 0, nil, fmt.Errorf("read message size: %s", err) - } - - if sz == 0 { - return 0, nil, fmt.Errorf("invalid message size: %d", sz) - } - - if sz >= MaxMessageSize { - return 0, nil, fmt.Errorf("max message size of %d exceeded: %d", MaxMessageSize, sz) - } - - // Read the value. - buf := make([]byte, sz) - if _, err := io.ReadFull(r, buf); err != nil { - return 0, nil, fmt.Errorf("read message value: %s", err) - } - - return typ[0], buf, nil -} - -// WriteTLV writes a type-length-value record to w. -func WriteTLV(w io.Writer, typ byte, buf []byte) error { - if _, err := w.Write([]byte{typ}); err != nil { - return fmt.Errorf("write message type: %s", err) - } - - // Write the size of the message. - if err := binary.Write(w, binary.BigEndian, int64(len(buf))); err != nil { - return fmt.Errorf("write message size: %s", err) - } - - // Write the value. - if _, err := w.Write(buf); err != nil { - return fmt.Errorf("write message value: %s", err) - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go deleted file mode 100644 index 114135db8..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/service_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package cluster_test - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" -) - -type metaStore struct { - host string -} - -func (m *metaStore) Node(nodeID uint64) (*meta.NodeInfo, error) { - return &meta.NodeInfo{ - ID: nodeID, - Host: m.host, - }, nil -} - -type testService struct { - nodeID uint64 - ln net.Listener - muxln net.Listener - writeShardFunc func(shardID uint64, points []tsdb.Point) error - createShardFunc func(database, policy string, shardID uint64) error - createMapperFunc func(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) -} - -func newTestWriteService(f func(shardID uint64, points []tsdb.Point) error) testService { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - panic(err) - } - - mux := tcp.NewMux() - muxln := mux.Listen(cluster.MuxHeader) - go mux.Serve(ln) - - return testService{ - writeShardFunc: f, - ln: ln, - muxln: muxln, - } -} - -func (ts *testService) Close() { - if ts.ln != nil { - ts.ln.Close() - } -} - -type serviceResponses []serviceResponse -type serviceResponse struct { - shardID uint64 - ownerID uint64 - points []tsdb.Point -} - -func (t testService) WriteToShard(shardID uint64, points []tsdb.Point) error { - return t.writeShardFunc(shardID, points) -} - -func (t testService) CreateShard(database, policy string, shardID uint64) error { - return t.createShardFunc(database, policy, shardID) -} - -func (t testService) CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) { - return t.createMapperFunc(shardID, query, chunkSize) -} - -func writeShardSuccess(shardID uint64, points []tsdb.Point) error { - responses <- &serviceResponse{ - shardID: shardID, - points: points, - } - return nil -} - -func writeShardFail(shardID uint64, points []tsdb.Point) error { - return fmt.Errorf("failed to write") -} - -var responses = make(chan *serviceResponse, 1024) - -func (testService) ResponseN(n int) ([]*serviceResponse, error) { - var a []*serviceResponse - for { - select { - case r := <-responses: - a = append(a, r) - if len(a) == n { - return a, nil - } - case <-time.After(time.Second): - return a, fmt.Errorf("unexpected response count: expected: %d, actual: %d", n, len(a)) - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go deleted file mode 100644 index 8756002d2..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper.go +++ /dev/null @@ -1,207 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "io" - "math/rand" - "net" - "time" - - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" - "gopkg.in/fatih/pool.v2" -) - -// ShardMapper is responsible for providing mappers for requested shards. It is -// responsible for creating those mappers from the local store, or reaching -// out to another node on the cluster. -type ShardMapper struct { - ForceRemoteMapping bool // All shards treated as remote. Useful for testing. - - MetaStore interface { - NodeID() uint64 - Node(id uint64) (ni *meta.NodeInfo, err error) - } - - TSDBStore interface { - CreateMapper(shardID uint64, query string, chunkSize int) (tsdb.Mapper, error) - } - - timeout time.Duration - pool *clientPool -} - -// NewShardMapper returns a mapper of local and remote shards. -func NewShardMapper(timeout time.Duration) *ShardMapper { - return &ShardMapper{ - pool: newClientPool(), - timeout: timeout, - } -} - -// CreateMapper returns a Mapper for the given shard ID. -func (s *ShardMapper) CreateMapper(sh meta.ShardInfo, stmt string, chunkSize int) (tsdb.Mapper, error) { - var err error - var m tsdb.Mapper - if sh.OwnedBy(s.MetaStore.NodeID()) && !s.ForceRemoteMapping { - m, err = s.TSDBStore.CreateMapper(sh.ID, stmt, chunkSize) - if err != nil { - return nil, err - } - } else { - // Pick a node in a pseudo-random manner. - conn, err := s.dial(sh.OwnerIDs[rand.Intn(len(sh.OwnerIDs))]) - if err != nil { - return nil, err - } - conn.SetDeadline(time.Now().Add(s.timeout)) - - rm := NewRemoteMapper(conn.(*pool.PoolConn), sh.ID, stmt, chunkSize) - m = rm - } - - return m, nil -} - -func (s *ShardMapper) dial(nodeID uint64) (net.Conn, error) { - // If we don't have a connection pool for that addr yet, create one - _, ok := s.pool.getPool(nodeID) - if !ok { - factory := &connFactory{nodeID: nodeID, clientPool: s.pool, timeout: s.timeout} - factory.metaStore = s.MetaStore - - p, err := pool.NewChannelPool(1, 3, factory.dial) - if err != nil { - return nil, err - } - s.pool.setPool(nodeID, p) - } - return s.pool.conn(nodeID) -} - -type remoteShardConn interface { - io.ReadWriter - Close() error - MarkUnusable() -} - -// RemoteMapper implements the tsdb.Mapper interface. It connects to a remote node, -// sends a query, and interprets the stream of data that comes back. -type RemoteMapper struct { - shardID uint64 - stmt string - chunkSize int - - tagsets []string - fields []string - - conn remoteShardConn - bufferedResponse *MapShardResponse -} - -// NewRemoteMapper returns a new remote mapper using the given connection. -func NewRemoteMapper(c remoteShardConn, shardID uint64, stmt string, chunkSize int) *RemoteMapper { - return &RemoteMapper{ - conn: c, - shardID: shardID, - stmt: stmt, - chunkSize: chunkSize, - } -} - -// Open connects to the remote node and starts receiving data. -func (r *RemoteMapper) Open() (err error) { - defer func() { - if err != nil { - r.conn.Close() - } - }() - // Build Map request. - var request MapShardRequest - request.SetShardID(r.shardID) - request.SetQuery(r.stmt) - request.SetChunkSize(int32(r.chunkSize)) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - if err := WriteTLV(r.conn, mapShardRequestMessage, buf); err != nil { - r.conn.MarkUnusable() - return err - } - - // Read the response. - _, buf, err = ReadTLV(r.conn) - if err != nil { - r.conn.MarkUnusable() - return err - } - - // Unmarshal response. - r.bufferedResponse = &MapShardResponse{} - if err := r.bufferedResponse.UnmarshalBinary(buf); err != nil { - return err - } - - if r.bufferedResponse.Code() != 0 { - return fmt.Errorf("error code %d: %s", r.bufferedResponse.Code(), r.bufferedResponse.Message()) - } - - // Decode the first response to get the TagSets. - r.tagsets = r.bufferedResponse.TagSets() - - return nil -} - -func (r *RemoteMapper) TagSets() []string { - return r.tagsets -} - -func (r *RemoteMapper) Fields() []string { - return r.fields -} - -// NextChunk returns the next chunk read from the remote node to the client. -func (r *RemoteMapper) NextChunk() (chunk interface{}, err error) { - output := &tsdb.MapperOutput{} - var response *MapShardResponse - - if r.bufferedResponse != nil { - response = r.bufferedResponse - r.bufferedResponse = nil - } else { - response = &MapShardResponse{} - - // Read the response. - _, buf, err := ReadTLV(r.conn) - if err != nil { - r.conn.MarkUnusable() - return nil, err - } - - // Unmarshal response. - if err := response.UnmarshalBinary(buf); err != nil { - return nil, err - } - - if response.Code() != 0 { - return nil, fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - } - - if response.Data() == nil { - return nil, nil - } - err = json.Unmarshal(response.Data(), output) - return output, err -} - -// Close the Mapper -func (r *RemoteMapper) Close() { - r.conn.Close() -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go deleted file mode 100644 index afb35ef15..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_mapper_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package cluster - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/influxdb/influxdb/tsdb" -) - -// remoteShardResponder implements the remoteShardConn interface. -type remoteShardResponder struct { - t *testing.T - rxBytes []byte - - buffer *bytes.Buffer -} - -func newRemoteShardResponder(outputs []*tsdb.MapperOutput, tagsets []string) *remoteShardResponder { - r := &remoteShardResponder{} - a := make([]byte, 0, 1024) - r.buffer = bytes.NewBuffer(a) - - // Pump the outputs in the buffer for later reading. - for _, o := range outputs { - resp := &MapShardResponse{} - resp.SetCode(0) - if o != nil { - d, _ := json.Marshal(o) - resp.SetData(d) - resp.SetTagSets(tagsets) - } - - g, _ := resp.MarshalBinary() - WriteTLV(r.buffer, mapShardResponseMessage, g) - } - - return r -} - -func (r remoteShardResponder) MarkUnusable() { return } -func (r remoteShardResponder) Close() error { return nil } -func (r remoteShardResponder) Read(p []byte) (n int, err error) { - return io.ReadFull(r.buffer, p) -} - -func (r remoteShardResponder) Write(p []byte) (n int, err error) { - if r.rxBytes == nil { - r.rxBytes = make([]byte, 0) - } - r.rxBytes = append(r.rxBytes, p...) - return len(p), nil -} - -// Ensure a RemoteMapper can process valid responses from a remote shard. -func TestShardWriter_RemoteMapper_Success(t *testing.T) { - expTagSets := []string{"tagsetA"} - expOutput := &tsdb.MapperOutput{ - Name: "cpu", - Tags: map[string]string{"host": "serverA"}, - } - - c := newRemoteShardResponder([]*tsdb.MapperOutput{expOutput, nil}, expTagSets) - - r := NewRemoteMapper(c, 1234, "SELECT * FROM CPU", 10) - if err := r.Open(); err != nil { - t.Fatalf("failed to open remote mapper: %s", err.Error()) - } - - if r.TagSets()[0] != expTagSets[0] { - t.Fatalf("incorrect tagsets received, exp %v, got %v", expTagSets, r.TagSets()) - } - - // Get first chunk from mapper. - chunk, err := r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - output, ok := chunk.(*tsdb.MapperOutput) - if !ok { - t.Fatal("chunk is not of expected type") - } - if output.Name != "cpu" { - t.Fatalf("received output incorrect, exp: %v, got %v", expOutput, output) - } - - // Next chunk should be nil, indicating no more data. - chunk, err = r.NextChunk() - if err != nil { - t.Fatalf("failed to get next chunk from mapper: %s", err.Error()) - } - if chunk != nil { - t.Fatal("received more chunks when none expected") - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go deleted file mode 100644 index a0f317be9..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer.go +++ /dev/null @@ -1,163 +0,0 @@ -package cluster - -import ( - "fmt" - "net" - "time" - - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" - "gopkg.in/fatih/pool.v2" -) - -const ( - writeShardRequestMessage byte = iota + 1 - writeShardResponseMessage - mapShardRequestMessage - mapShardResponseMessage -) - -// ShardWriter writes a set of points to a shard. -type ShardWriter struct { - pool *clientPool - timeout time.Duration - - MetaStore interface { - Node(id uint64) (ni *meta.NodeInfo, err error) - } -} - -// NewShardWriter returns a new instance of ShardWriter. -func NewShardWriter(timeout time.Duration) *ShardWriter { - return &ShardWriter{ - pool: newClientPool(), - timeout: timeout, - } -} - -func (w *ShardWriter) WriteShard(shardID, ownerID uint64, points []tsdb.Point) error { - c, err := w.dial(ownerID) - if err != nil { - return err - } - - conn, ok := c.(*pool.PoolConn) - if !ok { - panic("wrong connection type") - } - defer func(conn net.Conn) { - conn.Close() // return to pool - }(conn) - - // Build write request. - var request WriteShardRequest - request.SetShardID(shardID) - request.AddPoints(points) - - // Marshal into protocol buffers. - buf, err := request.MarshalBinary() - if err != nil { - return err - } - - // Write request. - conn.SetWriteDeadline(time.Now().Add(w.timeout)) - if err := WriteTLV(conn, writeShardRequestMessage, buf); err != nil { - conn.MarkUnusable() - return err - } - - // Read the response. - conn.SetReadDeadline(time.Now().Add(w.timeout)) - _, buf, err = ReadTLV(conn) - if err != nil { - conn.MarkUnusable() - return err - } - - // Unmarshal response. - var response WriteShardResponse - if err := response.UnmarshalBinary(buf); err != nil { - return err - } - - if response.Code() != 0 { - return fmt.Errorf("error code %d: %s", response.Code(), response.Message()) - } - - return nil -} - -func (c *ShardWriter) dial(nodeID uint64) (net.Conn, error) { - // If we don't have a connection pool for that addr yet, create one - _, ok := c.pool.getPool(nodeID) - if !ok { - factory := &connFactory{nodeID: nodeID, clientPool: c.pool, timeout: c.timeout} - factory.metaStore = c.MetaStore - - p, err := pool.NewChannelPool(1, 3, factory.dial) - if err != nil { - return nil, err - } - c.pool.setPool(nodeID, p) - } - return c.pool.conn(nodeID) -} - -func (w *ShardWriter) Close() error { - if w.pool == nil { - return fmt.Errorf("client already closed") - } - w.pool.close() - w.pool = nil - return nil -} - -const ( - maxConnections = 500 - maxRetries = 3 -) - -var errMaxConnectionsExceeded = fmt.Errorf("can not exceed max connections of %d", maxConnections) - -type connFactory struct { - nodeID uint64 - timeout time.Duration - - clientPool interface { - size() int - } - - metaStore interface { - Node(id uint64) (ni *meta.NodeInfo, err error) - } -} - -func (c *connFactory) dial() (net.Conn, error) { - if c.clientPool.size() > maxConnections { - return nil, errMaxConnectionsExceeded - } - - ni, err := c.metaStore.Node(c.nodeID) - if err != nil { - return nil, err - } - - if ni == nil { - return nil, fmt.Errorf("node %d does not exist", c.nodeID) - } - - conn, err := net.DialTimeout("tcp", ni.Host, c.timeout) - if err != nil { - return nil, err - } - - // Write a marker byte for cluster messages. - _, err = conn.Write([]byte{MuxHeader}) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go deleted file mode 100644 index d994315ca..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cluster/shard_writer_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package cluster_test - -import ( - "net" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/tsdb" -) - -// Ensure the shard writer can successful write a single request. -func TestShardWriter_WriteShard_Success(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []tsdb.Point - points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer can successful write a multiple requests. -func TestShardWriter_WriteShard_Multiple(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - - // Build a single point. - now := time.Now() - var points []tsdb.Point - points = append(points, tsdb.NewPoint("cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now)) - - // Write to shard twice and close. - if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.WriteShard(1, 2, points); err != nil { - t.Fatal(err) - } else if err := w.Close(); err != nil { - t.Fatal(err) - } - - // Validate response. - responses, err := ts.ResponseN(1) - if err != nil { - t.Fatal(err) - } else if responses[0].shardID != 1 { - t.Fatalf("unexpected shard id: %d", responses[0].shardID) - } - - // Validate point. - if p := responses[0].points[0]; p.Name() != "cpu" { - t.Fatalf("unexpected name: %s", p.Name()) - } else if p.Fields()["value"] != int64(100) { - t.Fatalf("unexpected 'value' field: %d", p.Fields()["value"]) - } else if p.Tags()["host"] != "server01" { - t.Fatalf("unexpected 'host' tag: %s", p.Tags()["host"]) - } else if p.Time().UnixNano() != now.UnixNano() { - t.Fatalf("unexpected time: %s", p.Time()) - } -} - -// Ensure the shard writer returns an error when the server fails to accept the write. -func TestShardWriter_WriteShard_Error(t *testing.T) { - ts := newTestWriteService(writeShardFail) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Minute) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []tsdb.Point - points = append(points, tsdb.NewPoint( - "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || err.Error() != "error code 1: write shard 1: failed to write" { - t.Fatalf("unexpected error: %v", err) - } -} - -// Ensure the shard writer returns an error when dialing times out. -func TestShardWriter_Write_ErrDialTimeout(t *testing.T) { - ts := newTestWriteService(writeShardSuccess) - s := cluster.NewService(cluster.Config{}) - s.Listener = ts.muxln - s.TSDBStore = ts - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - defer ts.Close() - - w := cluster.NewShardWriter(time.Nanosecond) - w.MetaStore = &metaStore{host: ts.ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []tsdb.Point - points = append(points, tsdb.NewPoint( - "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err, exp := w.WriteShard(shardID, ownerID, points), "i/o timeout"; err == nil || !strings.Contains(err.Error(), exp) { - t.Fatalf("expected error %v, to contain %s", err, exp) - } -} - -// Ensure the shard writer returns an error when reading times out. -func TestShardWriter_Write_ErrReadTimeout(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - w := cluster.NewShardWriter(time.Millisecond) - w.MetaStore = &metaStore{host: ln.Addr().String()} - now := time.Now() - - shardID := uint64(1) - ownerID := uint64(2) - var points []tsdb.Point - points = append(points, tsdb.NewPoint( - "cpu", tsdb.Tags{"host": "server01"}, map[string]interface{}{"value": int64(100)}, now, - )) - - if err := w.WriteShard(shardID, ownerID, points); err == nil || !strings.Contains(err.Error(), "i/o timeout") { - t.Fatalf("unexpected error: %s", err) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go deleted file mode 100644 index 24af8efa4..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main.go +++ /dev/null @@ -1,724 +0,0 @@ -package main - -import ( - "encoding/csv" - "encoding/json" - "flag" - "fmt" - "io" - "net" - "net/url" - "os" - "os/user" - "path/filepath" - "sort" - "strconv" - "strings" - "text/tabwriter" - - "github.com/influxdb/influxdb/client" - "github.com/influxdb/influxdb/importer/v8" - "github.com/peterh/liner" -) - -// These variables are populated via the Go linker. -var ( - version string = "0.9" -) - -const ( - // defaultFormat is the default format of the results when issuing queries - defaultFormat = "column" - - // defaultPPS is the default points per second that the import will throttle at - // by default it's 0, which means it will not throttle - defaultPPS = 0 -) - -type CommandLine struct { - Client *client.Client - Line *liner.State - Host string - Port int - Username string - Password string - Database string - Ssl bool - RetentionPolicy string - Version string - Pretty bool // controls pretty print for json - Format string // controls the output format. Valid values are json, csv, or column - Execute string - ShowVersion bool - Import bool - PPS int // Controls how many points per second the import will allow via throttling - Path string - Compressed bool -} - -func main() { - c := CommandLine{} - - fs := flag.NewFlagSet("InfluxDB shell version "+version, flag.ExitOnError) - fs.StringVar(&c.Host, "host", client.DefaultHost, "Influxdb host to connect to.") - fs.IntVar(&c.Port, "port", client.DefaultPort, "Influxdb port to connect to.") - fs.StringVar(&c.Username, "username", c.Username, "Username to connect to the server.") - fs.StringVar(&c.Password, "password", c.Password, `Password to connect to the server. Leaving blank will prompt for password (--password="").`) - fs.StringVar(&c.Database, "database", c.Database, "Database to connect to the server.") - fs.BoolVar(&c.Ssl, "ssl", false, "Use https for connecting to cluster.") - fs.StringVar(&c.Format, "format", defaultFormat, "Format specifies the format of the server responses: json, csv, or column.") - fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") - fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") - fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") - fs.BoolVar(&c.Import, "import", false, "Import a previous database.") - fs.IntVar(&c.PPS, "pps", defaultPPS, "How many points per second the import will allow. By default it is zero and will not throttle importing.") - fs.StringVar(&c.Path, "path", "", "path to the file to import") - fs.BoolVar(&c.Compressed, "compressed", false, "set to true if the import file is compressed") - - // Define our own custom usage to print - fs.Usage = func() { - fmt.Println(`Usage of influx: - -version - Display the version and exit. - -host 'host name' - Host to connect to. - -port 'port #' - Port to connect to. - -database 'database name' - Database to connect to the server. - -password 'password' - Password to connect to the server. Leaving blank will prompt for password (--password ''). - -username 'username' - Username to connect to the server. - -ssl - Use https for requests. - -execute 'command' - Execute command and quit. - -format 'json|csv|column' - Format specifies the format of the server responses: json, csv, or column. - -pretty - Turns on pretty print for the json format. - -import - Import a previous database export from file - -pps - How many points per second the import will allow. By default it is zero and will not throttle importing. - -path - Path to file to import - -compressed - Set to true if the import file is compressed - -Examples: - - # Use influx in a non-interactive mode to query the database "metrics" and pretty print json: - $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty - - # Connect to a specific database on startup and set database context: - $ influx -database 'metrics' -host 'localhost' -port '8086' -`) - } - fs.Parse(os.Args[1:]) - - if c.ShowVersion { - showVersion() - os.Exit(0) - } - - var promptForPassword bool - // determine if they set the password flag but provided no value - for _, v := range os.Args { - v = strings.ToLower(v) - if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.Password == "" { - promptForPassword = true - break - } - } - - c.Line = liner.NewLiner() - defer c.Line.Close() - - if promptForPassword { - p, e := c.Line.PasswordPrompt("password: ") - if e != nil { - fmt.Println("Unable to parse password.") - } else { - c.Password = p - } - } - - if err := c.connect(""); err != nil { - - } - if c.Execute == "" && !c.Import { - fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.Version) - } - - if c.Execute != "" { - if err := c.ExecuteQuery(c.Execute); err != nil { - c.Line.Close() - os.Exit(1) - } - c.Line.Close() - os.Exit(0) - } - - if c.Import { - path := net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) - u, e := client.ParseConnectionString(path, c.Ssl) - if e != nil { - fmt.Println(e) - return - } - - config := v8.NewConfig() - config.Username = c.Username - config.Password = c.Password - config.Precision = "ns" - config.WriteConsistency = "any" - config.Path = c.Path - config.Version = version - config.URL = u - config.Compressed = c.Compressed - config.PPS = c.PPS - - i := v8.NewImporter(config) - if err := i.Import(); err != nil { - fmt.Printf("ERROR: %s\n", err) - c.Line.Close() - os.Exit(1) - } - c.Line.Close() - os.Exit(0) - } - - showVersion() - - var historyFile string - usr, err := user.Current() - // Only load history if we can get the user - if err == nil { - historyFile = filepath.Join(usr.HomeDir, ".influx_history") - - if f, err := os.Open(historyFile); err == nil { - c.Line.ReadHistory(f) - f.Close() - } - } - - for { - l, e := c.Line.Prompt("> ") - if e != nil { - break - } - if c.ParseCommand(l) { - // write out the history - if len(historyFile) > 0 { - c.Line.AppendHistory(l) - if f, err := os.Create(historyFile); err == nil { - c.Line.WriteHistory(f) - f.Close() - } - } - } else { - break // exit main loop - } - } -} - -func showVersion() { - fmt.Println("InfluxDB shell " + version) -} - -func (c *CommandLine) ParseCommand(cmd string) bool { - lcmd := strings.TrimSpace(strings.ToLower(cmd)) - switch { - case strings.HasPrefix(lcmd, "exit"): - // signal the program to exit - return false - case strings.HasPrefix(lcmd, "gopher"): - c.gopher() - case strings.HasPrefix(lcmd, "connect"): - c.connect(cmd) - case strings.HasPrefix(lcmd, "auth"): - c.SetAuth(cmd) - case strings.HasPrefix(lcmd, "help"): - c.help() - case strings.HasPrefix(lcmd, "format"): - c.SetFormat(cmd) - case strings.HasPrefix(lcmd, "settings"): - c.Settings() - case strings.HasPrefix(lcmd, "pretty"): - c.Pretty = !c.Pretty - if c.Pretty { - fmt.Println("Pretty print enabled") - } else { - fmt.Println("Pretty print disabled") - } - case strings.HasPrefix(lcmd, "use"): - c.use(cmd) - case strings.HasPrefix(lcmd, "insert"): - c.Insert(cmd) - case lcmd == "": - break - default: - c.ExecuteQuery(cmd) - } - return true -} - -func (c *CommandLine) connect(cmd string) error { - var cl *client.Client - var u url.URL - - // Remove the "connect" keyword if it exists - path := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1)) - - // If they didn't provide a connection string, use the current settings - if path == "" { - path = net.JoinHostPort(c.Host, strconv.Itoa(c.Port)) - } - - var e error - u, e = client.ParseConnectionString(path, c.Ssl) - if e != nil { - return e - } - - config := client.NewConfig() - config.URL = u - config.Username = c.Username - config.Password = c.Password - config.UserAgent = "InfluxDBShell/" + version - cl, err := client.NewClient(config) - if err != nil { - return fmt.Errorf("Could not create client %s", err) - } - c.Client = cl - if _, v, e := c.Client.Ping(); e != nil { - return fmt.Errorf("Failed to connect to %s\n", c.Client.Addr()) - } else { - c.Version = v - } - return nil -} - -func (c *CommandLine) SetAuth(cmd string) { - // If they pass in the entire command, we should parse it - // auth - args := strings.Fields(cmd) - if len(args) == 3 { - args = args[1:] - } else { - args = []string{} - } - - if len(args) == 2 { - c.Username = args[0] - c.Password = args[1] - } else { - u, e := c.Line.Prompt("username: ") - if e != nil { - fmt.Printf("Unable to process input: %s", e) - return - } - c.Username = strings.TrimSpace(u) - p, e := c.Line.PasswordPrompt("password: ") - if e != nil { - fmt.Printf("Unable to process input: %s", e) - return - } - c.Password = p - } - - // Update the client as well - c.Client.SetAuth(c.Username, c.Password) -} - -func (c *CommandLine) use(cmd string) { - args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") - if len(args) != 2 { - fmt.Printf("Could not parse database name from %q.\n", cmd) - return - } - d := args[1] - c.Database = d - fmt.Printf("Using database %s\n", d) -} - -func (c *CommandLine) SetFormat(cmd string) { - // Remove the "format" keyword if it exists - cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1)) - // normalize cmd - cmd = strings.ToLower(cmd) - - switch cmd { - case "json", "csv", "column": - c.Format = cmd - default: - fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd) - } -} - -// isWhitespace returns true if the rune is a space, tab, or newline. -func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } - -// isLetter returns true if the rune is a letter. -func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } - -// isDigit returns true if the rune is a digit. -func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } - -// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. -func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } - -// isIdentChar returns true if the rune can be used in an unquoted identifier. -func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') } - -func parseUnquotedIdentifier(stmt string) (string, string) { - if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 { - return fields[0], strings.TrimPrefix(stmt, fields[0]) - } - return "", stmt -} - -func parseDoubleQuotedIdentifier(stmt string) (string, string) { - escapeNext := false - fields := strings.FieldsFunc(stmt, func(ch rune) bool { - if ch == '\\' { - escapeNext = true - } else if ch == '"' { - if !escapeNext { - return true - } - escapeNext = false - } - return false - }) - if len(fields) > 0 { - return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"") - } - return "", stmt -} - -func parseNextIdentifier(stmt string) (ident, remainder string) { - if len(stmt) > 0 { - switch { - case isWhitespace(rune(stmt[0])): - return parseNextIdentifier(stmt[1:]) - case isIdentFirstChar(rune(stmt[0])): - return parseUnquotedIdentifier(stmt) - case stmt[0] == '"': - return parseDoubleQuotedIdentifier(stmt) - } - } - return "", stmt -} - -func (c *CommandLine) parseInto(stmt string) string { - ident, stmt := parseNextIdentifier(stmt) - if strings.HasPrefix(stmt, ".") { - c.Database = ident - fmt.Printf("Using database %s\n", c.Database) - ident, stmt = parseNextIdentifier(stmt[1:]) - } - if strings.HasPrefix(stmt, " ") { - c.RetentionPolicy = ident - fmt.Printf("Using retention policy %s\n", c.RetentionPolicy) - return stmt[1:] - } - return stmt -} - -func (c *CommandLine) Insert(stmt string) error { - i, point := parseNextIdentifier(stmt) - if !strings.EqualFold(i, "insert") { - fmt.Printf("ERR: found %s, expected INSERT\n", i) - return nil - } - if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") { - point = c.parseInto(r) - } - _, err := c.Client.Write(client.BatchPoints{ - Points: []client.Point{ - client.Point{Raw: point}, - }, - Database: c.Database, - RetentionPolicy: c.RetentionPolicy, - Precision: "n", - WriteConsistency: client.ConsistencyAny, - }) - if err != nil { - fmt.Printf("ERR: %s\n", err) - if c.Database == "" { - fmt.Println("Note: error may be due to not setting a database or retention policy.") - fmt.Println(`Please set a database with the command "use " or`) - fmt.Println("INSERT INTO . ") - } - return err - } - return nil -} - -func (c *CommandLine) ExecuteQuery(query string) error { - response, err := c.Client.Query(client.Query{Command: query, Database: c.Database}) - if err != nil { - fmt.Printf("ERR: %s\n", err) - return err - } - c.FormatResponse(response, os.Stdout) - if err := response.Error(); err != nil { - fmt.Printf("ERR: %s\n", response.Error()) - if c.Database == "" { - fmt.Println("Warning: It is possible this error is due to not setting a database.") - fmt.Println(`Please set a database with the command "use ".`) - } - return err - } - return nil -} - -func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) { - switch c.Format { - case "json": - c.writeJSON(response, w) - case "csv": - c.writeCSV(response, w) - case "column": - c.writeColumns(response, w) - default: - fmt.Fprintf(w, "Unknown output format %q.\n", c.Format) - } -} - -func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) { - var data []byte - var err error - if c.Pretty { - data, err = json.MarshalIndent(response, "", " ") - } else { - data, err = json.Marshal(response) - } - if err != nil { - fmt.Fprintf(w, "Unable to parse json: %s\n", err) - return - } - fmt.Fprintln(w, string(data)) -} - -func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) { - csvw := csv.NewWriter(w) - for _, result := range response.Results { - // Create a tabbed writer for each result as they won't always line up - rows := c.formatResults(result, "\t") - for _, r := range rows { - csvw.Write(strings.Split(r, "\t")) - } - csvw.Flush() - } -} - -func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) { - for _, result := range response.Results { - // Create a tabbed writer for each result a they won't always line up - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) - csv := c.formatResults(result, "\t") - for _, r := range csv { - fmt.Fprintln(w, r) - } - w.Flush() - } -} - -// formatResults will behave differently if you are formatting for columns or csv -func (c *CommandLine) formatResults(result client.Result, separator string) []string { - rows := []string{} - // Create a tabbed writer for each result a they won't always line up - for i, row := range result.Series { - // gather tags - tags := []string{} - for k, v := range row.Tags { - tags = append(tags, fmt.Sprintf("%s=%s", k, v)) - sort.Strings(tags) - } - - columnNames := []string{} - - // Only put name/tags in a column if format is csv - if c.Format == "csv" { - if len(tags) > 0 { - columnNames = append([]string{"tags"}, columnNames...) - } - - if row.Name != "" { - columnNames = append([]string{"name"}, columnNames...) - } - } - - for _, column := range row.Columns { - columnNames = append(columnNames, column) - } - - // Output a line separator if we have more than one set or results and format is column - if i > 0 && c.Format == "column" { - rows = append(rows, "") - } - - // If we are column format, we break out the name/tag to seperate lines - if c.Format == "column" { - if row.Name != "" { - n := fmt.Sprintf("name: %s", row.Name) - rows = append(rows, n) - if len(tags) == 0 { - l := strings.Repeat("-", len(n)) - rows = append(rows, l) - } - } - if len(tags) > 0 { - t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", "))) - rows = append(rows, t) - } - } - - rows = append(rows, strings.Join(columnNames, separator)) - - // if format is column, break tags to their own line/format - if c.Format == "column" && len(tags) > 0 { - lines := []string{} - for _, columnName := range columnNames { - lines = append(lines, strings.Repeat("-", len(columnName))) - } - rows = append(rows, strings.Join(lines, separator)) - } - - for _, v := range row.Values { - var values []string - if c.Format == "csv" { - if row.Name != "" { - values = append(values, row.Name) - } - if len(tags) > 0 { - values = append(values, strings.Join(tags, ",")) - } - } - - for _, vv := range v { - values = append(values, interfaceToString(vv)) - } - rows = append(rows, strings.Join(values, separator)) - } - // Outout a line separator if in column format - if c.Format == "column" { - rows = append(rows, "") - } - } - return rows -} - -func interfaceToString(v interface{}) string { - switch t := v.(type) { - case nil: - return "" - case bool: - return fmt.Sprintf("%v", v) - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr: - return fmt.Sprintf("%d", t) - case float32, float64: - return fmt.Sprintf("%v", t) - default: - return fmt.Sprintf("%v", t) - } -} - -func (c *CommandLine) Settings() { - w := new(tabwriter.Writer) - w.Init(os.Stdout, 0, 8, 1, '\t', 0) - if c.Port > 0 { - fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port) - } else { - fmt.Fprintf(w, "Host\t%s\n", c.Host) - } - fmt.Fprintf(w, "Username\t%s\n", c.Username) - fmt.Fprintf(w, "Database\t%s\n", c.Database) - fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty) - fmt.Fprintf(w, "Format\t%s\n", c.Format) - fmt.Fprintln(w) - w.Flush() -} - -func (c *CommandLine) help() { - fmt.Println(`Usage: - connect connect to another node - auth prompt for username and password - pretty toggle pretty print - use set current databases - format set the output format: json, csv, or column - settings output the current settings for the shell - exit quit the influx shell - - show databases show database names - show series show series information - show measurements show measurement information - show tag keys show tag key information - show tag values show tag value information - - a full list of influxql commands can be found at: - https://influxdb.com/docs/v0.9/query_language/spec.html -`) -} - -func (c *CommandLine) gopher() { - fmt.Println(` - .-::-::://:-::- .:/++/' - '://:-''/oo+//++o+/.://o- ./+: - .:-. '++- .o/ '+yydhy' o- - .:/. .h: :osoys .smMN- :/ - -/:.' s- /MMMymh. '/y/ s' - -+s:'''' d -mMMms// '-/o: - -/++/++/////:. o: '... s- :s. - :+-+s-' ':/' 's- /+ 'o: - '+-'o: /ydhsh. '//. '-o- o- - .y. o: .MMMdm+y ':+++:::/+:.' s: - .-h/ y- 'sdmds'h -+ydds:::-.' 'h. - .//-.d' o: '.' 'dsNMMMNh:.:++' :y - +y. 'd 's. .s:mddds: ++ o/ - 'N- odd 'o/. './o-s-' .---+++' o- - 'N' yNd .://:/:::::. -s -+/s/./s' 'o/' - so' .h '''' ////s: '+. .s +y' - os/-.y' 's' 'y::+ +d' - '.:o/ -+:-:.' so.---.' - o' 'd-.''/s' - .s' :y.''.y - -s mo:::' - :: yh - // '''' /M' - o+ .s///:/. 'N: - :+ /: -s' ho - 's- -/s/:+/.+h' +h - ys' ':' '-. -d - oh .h - /o .s - s. .h - -y .d - m/ -h - +d /o - 'N- y: - h: m. - s- -d - o- s+ - +- 'm' - s/ oo--. - y- /s ':+' - s' 'od--' .d: - -+ ':o: ':+-/+ - y- .:+- ' - //o- '.:+/. - .-:+/' ''-/+/. - ./:' ''.:o+/-' - .+o:/:/+-' ''.-+ooo/-' - o: -h///++////-. - /: .o/ - //+ 'y - ./sooy. - -`) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go deleted file mode 100644 index eb58cb1e8..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx/main_test.go +++ /dev/null @@ -1,194 +0,0 @@ -package main_test - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/influxdb/influxdb/client" - main "github.com/influxdb/influxdb/cmd/influx" -) - -func TestParseCommand_CommandsExist(t *testing.T) { - t.Parallel() - c := main.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "gopher"}, - {cmd: "connect"}, - {cmd: "help"}, - {cmd: "pretty"}, - {cmd: "use"}, - {cmd: ""}, // test that a blank command just returns - } - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_TogglePretty(t *testing.T) { - t.Parallel() - c := main.CommandLine{} - if c.Pretty { - t.Fatalf(`Pretty should be false.`) - } - c.ParseCommand("pretty") - if !c.Pretty { - t.Fatalf(`Pretty should be true.`) - } - c.ParseCommand("pretty") - if c.Pretty { - t.Fatalf(`Pretty should be false.`) - } -} - -func TestParseCommand_Exit(t *testing.T) { - t.Parallel() - c := main.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "exit"}, - {cmd: " exit"}, - {cmd: "exit "}, - {cmd: "Exit "}, - } - - for _, test := range tests { - if c.ParseCommand(test.cmd) { - t.Fatalf(`Command "exit" failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_Use(t *testing.T) { - t.Parallel() - c := main.CommandLine{} - tests := []struct { - cmd string - }{ - {cmd: "use db"}, - {cmd: " use db"}, - {cmd: "use db "}, - {cmd: "use db;"}, - {cmd: "use db; "}, - {cmd: "Use db"}, - } - - for _, test := range tests { - if !c.ParseCommand(test.cmd) { - t.Fatalf(`Command "use" failed for %q.`, test.cmd) - } - - if c.Database != "db" { - t.Fatalf(`Command "use" changed database to %q. Expected db`, c.Database) - } - } -} - -func TestParseCommand_Insert(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - m := main.CommandLine{Client: c} - - tests := []struct { - cmd string - }{ - {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: " INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: "INSERT cpu,host=serverA,region=us-west value=1.0"}, - {cmd: "insert cpu,host=serverA,region=us-west value=1.0 "}, - {cmd: "insert"}, - {cmd: "Insert "}, - {cmd: "insert c"}, - {cmd: "insert int"}, - } - - for _, test := range tests { - if !m.ParseCommand(test.cmd) { - t.Fatalf(`Command "insert" failed for %q.`, test.cmd) - } - } -} - -func TestParseCommand_InsertInto(t *testing.T) { - t.Parallel() - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var data client.Response - w.WriteHeader(http.StatusNoContent) - _ = json.NewEncoder(w).Encode(data) - })) - defer ts.Close() - - u, _ := url.Parse(ts.URL) - config := client.Config{URL: *u} - c, err := client.NewClient(config) - if err != nil { - t.Fatalf("unexpected error. expected %v, actual %v", nil, err) - } - m := main.CommandLine{Client: c} - - tests := []struct { - cmd, db, rp string - }{ - { - cmd: `INSERT INTO test cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test", - }, - { - cmd: ` INSERT INTO .test cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test", - }, - { - cmd: `INSERT INTO "test test" cpu,host=serverA,region=us-west value=1.0`, - db: "", - rp: "test test", - }, - { - cmd: `Insert iNTO test.test cpu,host=serverA,region=us-west value=1.0`, - db: "test", - rp: "test", - }, - { - cmd: `insert into "test test" cpu,host=serverA,region=us-west value=1.0`, - db: "test", - rp: "test test", - }, - { - cmd: `insert into "d b"."test test" cpu,host=serverA,region=us-west value=1.0`, - db: "d b", - rp: "test test", - }, - } - - for _, test := range tests { - if !m.ParseCommand(test.cmd) { - t.Fatalf(`Command "insert into" failed for %q.`, test.cmd) - } - if m.Database != test.db { - t.Fatalf(`Command "insert into" db parsing failed, expected: %q, actual: %q`, test.db, m.Database) - } - if m.RetentionPolicy != test.rp { - t.Fatalf(`Command "insert into" rp parsing failed, expected: %q, actual: %q`, test.rp, m.RetentionPolicy) - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go deleted file mode 100644 index 7b9bc9873..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influx_stress/influx_stress.go +++ /dev/null @@ -1,154 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "math/rand" - "net/url" - "runtime" - "sort" - "sync" - "time" - - "github.com/influxdb/influxdb/client" -) - -var ( - batchSize = flag.Int("batchsize", 5000, "number of points per batch") - seriesCount = flag.Int("series", 100000, "number of unique series to create") - pointCount = flag.Int("points", 100, "number of points per series to create") - concurrency = flag.Int("concurrency", 10, "number of simultaneous writes to run") - batchInterval = flag.Duration("batchinterval", 0*time.Second, "duration between batches") - database = flag.String("database", "stress", "name of database") - address = flag.String("addr", "localhost:8086", "IP address and port of database (e.g., localhost:8086)") -) - -func main() { - flag.Parse() - runtime.GOMAXPROCS(runtime.NumCPU()) - - startTime := time.Now() - counter := NewConcurrencyLimiter(*concurrency) - - u, _ := url.Parse(fmt.Sprintf("http://%s", *address)) - c, err := client.NewClient(client.Config{URL: *u}) - if err != nil { - panic(err) - } - - var mu sync.Mutex - var wg sync.WaitGroup - responseTimes := make([]int, 0) - - totalPoints := 0 - - batch := &client.BatchPoints{ - Database: *database, - WriteConsistency: "any", - Time: time.Now(), - Precision: "n", - } - for i := 1; i <= *pointCount; i++ { - for j := 1; j <= *seriesCount; j++ { - p := client.Point{ - Measurement: "cpu", - Tags: map[string]string{"region": "uswest", "host": fmt.Sprintf("host-%d", j)}, - Fields: map[string]interface{}{"value": rand.Float64()}, - } - batch.Points = append(batch.Points, p) - if len(batch.Points) >= *batchSize { - wg.Add(1) - counter.Increment() - totalPoints += len(batch.Points) - go func(b *client.BatchPoints, total int) { - st := time.Now() - if _, err := c.Write(*b); err != nil { - fmt.Println("ERROR: ", err.Error()) - } else { - mu.Lock() - responseTimes = append(responseTimes, int(time.Since(st).Nanoseconds())) - mu.Unlock() - } - wg.Done() - counter.Decrement() - if total%500000 == 0 { - fmt.Printf("%d total points. %d in %s\n", total, *batchSize, time.Since(st)) - } - }(batch, totalPoints) - - batch = &client.BatchPoints{ - Database: *database, - WriteConsistency: "any", - Precision: "n", - Time: time.Now(), - } - } - } - } - - wg.Wait() - sort.Sort(sort.Reverse(sort.IntSlice(responseTimes))) - - total := int64(0) - for _, t := range responseTimes { - total += int64(t) - } - mean := total / int64(len(responseTimes)) - - fmt.Printf("Wrote %d points at average rate of %.0f\n", totalPoints, float64(totalPoints)/time.Since(startTime).Seconds()) - fmt.Println("Average response time: ", time.Duration(mean)) - fmt.Println("Slowest response times:") - for _, r := range responseTimes[:100] { - fmt.Println(time.Duration(r)) - } -} - -// ConcurrencyLimiter is a go routine safe struct that can be used to -// ensure that no more than a specifid max number of goroutines are -// executing. -type ConcurrencyLimiter struct { - inc chan chan struct{} - dec chan struct{} - max int - count int -} - -// NewConcurrencyLimiter returns a configured limiter that will -// ensure that calls to Increment will block if the max is hit. -func NewConcurrencyLimiter(max int) *ConcurrencyLimiter { - c := &ConcurrencyLimiter{ - inc: make(chan chan struct{}), - dec: make(chan struct{}, max), - max: max, - } - go c.handleLimits() - return c -} - -// Increment will increase the count of running goroutines by 1. -// if the number is currently at the max, the call to Increment -// will block until another goroutine decrements. -func (c *ConcurrencyLimiter) Increment() { - r := make(chan struct{}) - c.inc <- r - <-r -} - -// Decrement will reduce the count of running goroutines by 1 -func (c *ConcurrencyLimiter) Decrement() { - c.dec <- struct{}{} -} - -// handleLimits runs in a goroutine to manage the count of -// running goroutines. -func (c *ConcurrencyLimiter) handleLimits() { - for { - r := <-c.inc - if c.count >= c.max { - <-c.dec - c.count-- - } - c.count++ - r <- struct{}{} - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go deleted file mode 100644 index c88652f75..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup.go +++ /dev/null @@ -1,170 +0,0 @@ -package backup - -import ( - "encoding/json" - "errors" - "flag" - "fmt" - "io" - "log" - "net" - "os" - - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/snapshot" -) - -// Suffix is a suffix added to the backup while it's in-process. -const Suffix = ".pending" - -// Command represents the program execution for "influxd backup". -type Command struct { - // The logger passed to the ticker during execution. - Logger *log.Logger - - // Standard input/output, overridden for testing. - Stderr io.Writer -} - -// NewCommand returns a new instance of Command with default settings. -func NewCommand() *Command { - return &Command{ - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (cmd *Command) Run(args ...string) error { - // Set up logger. - cmd.Logger = log.New(cmd.Stderr, "", log.LstdFlags) - cmd.Logger.Printf("influxdb backup") - - // Parse command line arguments. - host, path, err := cmd.parseFlags(args) - if err != nil { - return err - } - - // Retrieve snapshot from local file. - m, err := snapshot.ReadFileManifest(path) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("read file snapshot: %s", err) - } - - // Determine temporary path to download to. - tmppath := path + Suffix - - // Calculate path of next backup file. - // This uses the path if it doesn't exist. - // Otherwise it appends an autoincrementing number. - path, err = cmd.nextPath(path) - if err != nil { - return fmt.Errorf("next path: %s", err) - } - - // Retrieve snapshot. - if err := cmd.download(host, m, tmppath); err != nil { - return fmt.Errorf("download: %s", err) - } - - // Rename temporary file to final path. - if err := os.Rename(tmppath, path); err != nil { - return fmt.Errorf("rename: %s", err) - } - - // TODO: Check file integrity. - - // Notify user of completion. - cmd.Logger.Println("backup complete") - - return nil -} - -// parseFlags parses and validates the command line arguments. -func (cmd *Command) parseFlags(args []string) (host string, path string, err error) { - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&host, "host", "localhost:8088", "") - fs.SetOutput(cmd.Stderr) - fs.Usage = cmd.printUsage - if err := fs.Parse(args); err != nil { - return "", "", err - } - - // Ensure that only one arg is specified. - if fs.NArg() == 0 { - return "", "", errors.New("snapshot path required") - } else if fs.NArg() != 1 { - return "", "", errors.New("only one snapshot path allowed") - } - path = fs.Arg(0) - - return host, path, nil -} - -// nextPath returns the next file to write to. -func (cmd *Command) nextPath(path string) (string, error) { - // Use base path if it doesn't exist. - if _, err := os.Stat(path); os.IsNotExist(err) { - return path, nil - } else if err != nil { - return "", err - } - - // Otherwise iterate through incremental files until one is available. - for i := 0; ; i++ { - s := fmt.Sprintf(path+".%d", i) - if _, err := os.Stat(s); os.IsNotExist(err) { - return s, nil - } else if err != nil { - return "", err - } - } -} - -// download downloads a snapshot from a host to a given path. -func (cmd *Command) download(host string, m *snapshot.Manifest, path string) error { - // Create local file to write to. - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("open temp file: %s", err) - } - defer f.Close() - - // Connect to snapshotter service. - conn, err := net.Dial("tcp", host) - if err != nil { - return err - } - defer conn.Close() - - // Send snapshotter marker byte. - if _, err := conn.Write([]byte{snapshotter.MuxHeader}); err != nil { - return fmt.Errorf("write snapshot header byte: %s", err) - } - - // Write the manifest we currently have. - if err := json.NewEncoder(conn).Encode(m); err != nil { - return fmt.Errorf("encode snapshot manifest: %s", err) - } - - // Read snapshot from the connection. - if _, err := io.Copy(f, conn); err != nil { - return fmt.Errorf("copy snapshot to file: %s", err) - } - - // FIXME(benbjohnson): Verify integrity of snapshot. - - return nil -} - -// printUsage prints the usage message to STDERR. -func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stderr, `usage: influxd backup [flags] PATH - -backup downloads a snapshot of a data node and saves it to disk. - - -host - The host to connect to snapshot. - Defaults to 127.0.0.1:8088. -`) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go deleted file mode 100644 index 15db96449..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/backup/backup_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package backup_test - -/* -import ( - "bytes" - "net/http" - "net/http/httptest" - "os" - "strings" - "testing" - - "github.com/influxdb/influxdb" - "github.com/influxdb/influxdb/cmd/influxd" -) - -// Ensure the backup can download from the server and save to disk. -func TestBackupCommand(t *testing.T) { - // Mock the backup endpoint. - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path != "/data/snapshot" { - t.Fatalf("unexpected url path: %s", r.URL.Path) - } - - // Write a simple snapshot to the buffer. - sw := influxdb.NewSnapshotWriter() - sw.Snapshot = &influxdb.Snapshot{Files: []influxdb.SnapshotFile{ - {Name: "meta", Size: 5, Index: 10}, - }} - sw.FileWriters["meta"] = influxdb.NopWriteToCloser(bytes.NewBufferString("55555")) - if _, err := sw.WriteTo(w); err != nil { - t.Fatal(err) - } - })) - defer s.Close() - - // Create a temp path and remove incremental backups at the end. - path := tempfile() - defer os.Remove(path) - defer os.Remove(path + ".0") - defer os.Remove(path + ".1") - - // Execute the backup against the mock server. - for i := 0; i < 3; i++ { - if err := NewBackupCommand().Run("-host", s.URL, path); err != nil { - t.Fatal(err) - } - } - - // Verify snapshot and two incremental snapshots were written. - if _, err := os.Stat(path); err != nil { - t.Fatalf("snapshot not found: %s", err) - } else if _, err = os.Stat(path + ".0"); err != nil { - t.Fatalf("incremental snapshot(0) not found: %s", err) - } else if _, err = os.Stat(path + ".1"); err != nil { - t.Fatalf("incremental snapshot(1) not found: %s", err) - } -} - -// Ensure the backup command returns an error if flags cannot be parsed. -func TestBackupCommand_ErrFlagParse(t *testing.T) { - cmd := NewBackupCommand() - if err := cmd.Run("-bad-flag"); err == nil || err.Error() != `flag provided but not defined: -bad-flag` { - t.Fatal(err) - } else if !strings.Contains(cmd.Stderr.String(), "usage") { - t.Fatal("usage message not displayed") - } -} - -// Ensure the backup command returns an error if the host cannot be parsed. -func TestBackupCommand_ErrInvalidHostURL(t *testing.T) { - if err := NewBackupCommand().Run("-host", "http://%f"); err == nil || err.Error() != `parse host url: parse http://%f: hexadecimal escape in host` { - t.Fatal(err) - } -} - -// Ensure the backup command returns an error if the output path is not specified. -func TestBackupCommand_ErrPathRequired(t *testing.T) { - if err := NewBackupCommand().Run("-host", "//localhost"); err == nil || err.Error() != `snapshot path required` { - t.Fatal(err) - } -} - -// Ensure the backup returns an error if it cannot connect to the server. -func TestBackupCommand_ErrConnectionRefused(t *testing.T) { - // Start and immediately stop a server so we have a dead port. - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) - s.Close() - - // Execute the backup command. - path := tempfile() - defer os.Remove(path) - if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || - !(strings.Contains(err.Error(), `connection refused`) || strings.Contains(err.Error(), `No connection could be made`)) { - t.Fatal(err) - } -} - -// Ensure the backup returns any non-200 status codes. -func TestBackupCommand_ErrServerError(t *testing.T) { - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - })) - defer s.Close() - - // Execute the backup command. - path := tempfile() - defer os.Remove(path) - if err := NewBackupCommand().Run("-host", s.URL, path); err == nil || err.Error() != `download: snapshot error: status=500` { - t.Fatal(err) - } -} - -// BackupCommand is a test wrapper for main.BackupCommand. -type BackupCommand struct { - *main.BackupCommand - Stderr bytes.Buffer -} - -// NewBackupCommand returns a new instance of BackupCommand. -func NewBackupCommand() *BackupCommand { - cmd := &BackupCommand{BackupCommand: main.NewBackupCommand()} - cmd.BackupCommand.Stderr = &cmd.Stderr - return cmd -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go deleted file mode 100644 index 3f6bbfb08..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/help/help.go +++ /dev/null @@ -1,46 +0,0 @@ -package help - -import ( - "fmt" - "io" - "os" - "strings" -) - -// Command displays help for command-line sub-commands. -type Command struct { - Stdout io.Writer -} - -// NewCommand returns a new instance of Command. -func NewCommand() *Command { - return &Command{ - Stdout: os.Stdout, - } -} - -// Run executes the command. -func (cmd *Command) Run(args ...string) error { - fmt.Fprintln(cmd.Stdout, strings.TrimSpace(usage)) - return nil -} - -const usage = ` -Configure and start an InfluxDB server. - -Usage: - - influxd [[command] [arguments]] - -The commands are: - - backup downloads a snapshot of a data node and saves it to disk - config display the default configuration - restore uses a snapshot of a data node to rebuild a cluster - run run node with existing configuration - version displays the InfluxDB version - -"run" is the default command. - -Use "influxd help [command]" for more information about a command. -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go deleted file mode 100644 index 9748493a5..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/main.go +++ /dev/null @@ -1,200 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io" - "log" - "math/rand" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/influxdb/influxdb/cmd/influxd/backup" - "github.com/influxdb/influxdb/cmd/influxd/help" - "github.com/influxdb/influxdb/cmd/influxd/restore" - "github.com/influxdb/influxdb/cmd/influxd/run" -) - -// These variables are populated via the Go linker. -var ( - version string = "0.9" - commit string - branch string -) - -func init() { - // If commit or branch are not set, make that clear. - if commit == "" { - commit = "unknown" - } - if branch == "" { - branch = "unknown" - } -} - -func main() { - rand.Seed(time.Now().UnixNano()) - - m := NewMain() - if err := m.Run(os.Args[1:]...); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -// Main represents the program execution. -type Main struct { - Logger *log.Logger - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain return a new instance of Main. -func NewMain() *Main { - return &Main{ - Logger: log.New(os.Stderr, "[run] ", log.LstdFlags), - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run determines and runs the command specified by the CLI args. -func (m *Main) Run(args ...string) error { - name, args := ParseCommandName(args) - - // Extract name from args. - switch name { - case "", "run": - cmd := run.NewCommand() - - // Tell the server the build details. - cmd.Version = version - cmd.Commit = commit - cmd.Branch = branch - - if err := cmd.Run(args...); err != nil { - return fmt.Errorf("run: %s", err) - } - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - m.Logger.Println("Listening for signals") - - // Block until one of the signals above is received - select { - case <-signalCh: - m.Logger.Println("Signal received, initializing clean shutdown...") - go func() { - cmd.Close() - }() - } - - // Block again until another signal is received, a shutdown timeout elapses, - // or the Command is gracefully closed - m.Logger.Println("Waiting for clean shutdown...") - select { - case <-signalCh: - m.Logger.Println("second signal received, initializing hard shutdown") - case <-time.After(time.Second * 30): - m.Logger.Println("time limit reached, initializing hard shutdown") - case <-cmd.Closed: - m.Logger.Println("server shutdown completed") - } - - // goodbye. - - case "backup": - name := backup.NewCommand() - if err := name.Run(args...); err != nil { - return fmt.Errorf("backup: %s", err) - } - case "restore": - name := restore.NewCommand() - if err := name.Run(args...); err != nil { - return fmt.Errorf("restore: %s", err) - } - case "config": - if err := run.NewPrintConfigCommand().Run(args...); err != nil { - return fmt.Errorf("config: %s", err) - } - case "version": - if err := NewVersionCommand().Run(args...); err != nil { - return fmt.Errorf("version: %s", err) - } - case "help": - if err := help.NewCommand().Run(args...); err != nil { - return fmt.Errorf("help: %s", err) - } - default: - return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'influxd help' for usage`+"\n\n", name) - } - - return nil -} - -// ParseCommandName extracts the command name and args from the args list. -func ParseCommandName(args []string) (string, []string) { - // Retrieve command name as first argument. - var name string - if len(args) > 0 && !strings.HasPrefix(args[0], "-") { - name = args[0] - } - - // Special case -h immediately following binary name - if len(args) > 0 && args[0] == "-h" { - name = "help" - } - - // If command is "help" and has an argument then rewrite args to use "-h". - if name == "help" && len(args) > 1 { - args[0], args[1] = args[1], "-h" - name = args[0] - } - - // If a named command is specified then return it with its arguments. - if name != "" { - return name, args[1:] - } - return "", args -} - -// Command represents the command executed by "influxd version". -type VersionCommand struct { - Stdout io.Writer - Stderr io.Writer -} - -// NewVersionCommand return a new instance of VersionCommand. -func NewVersionCommand() *VersionCommand { - return &VersionCommand{ - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run prints the current version and commit info. -func (cmd *VersionCommand) Run(args ...string) error { - // Parse flags in case -h is specified. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, strings.TrimSpace(versionUsage)) } - if err := fs.Parse(args); err != nil { - return err - } - - // Print version info. - fmt.Fprintf(cmd.Stdout, "InfluxDB v%s (git: %s %s)\n", version, branch, commit) - - return nil -} - -var versionUsage = ` -usage: version - - version displays the InfluxDB version, build branch and git commit hash -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go deleted file mode 100644 index 5a95f8726..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore.go +++ /dev/null @@ -1,250 +0,0 @@ -package restore - -import ( - "bytes" - "errors" - "flag" - "fmt" - "io" - "net" - "os" - "path/filepath" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/snapshot" - "github.com/influxdb/influxdb/tsdb" -) - -// Command represents the program execution for "influxd restore". -type Command struct { - Stdout io.Writer - Stderr io.Writer -} - -// NewCommand returns a new instance of Command with default settings. -func NewCommand() *Command { - return &Command{ - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (cmd *Command) Run(args ...string) error { - config, path, err := cmd.parseFlags(args) - if err != nil { - return err - } - - return cmd.Restore(config, path) -} - -func (cmd *Command) Restore(config *Config, path string) error { - // Remove meta and data directories. - if err := os.RemoveAll(config.Meta.Dir); err != nil { - return fmt.Errorf("remove meta dir: %s", err) - } else if err := os.RemoveAll(config.Data.Dir); err != nil { - return fmt.Errorf("remove data dir: %s", err) - } - - // Open snapshot file and all incremental backups. - mr, files, err := snapshot.OpenFileMultiReader(path) - if err != nil { - return fmt.Errorf("open multireader: %s", err) - } - defer closeAll(files) - - // Unpack files from archive. - if err := cmd.unpack(mr, config); err != nil { - return fmt.Errorf("unpack: %s", err) - } - - // Notify user of completion. - fmt.Fprintf(os.Stdout, "restore complete using %s", path) - return nil -} - -// parseFlags parses and validates the command line arguments. -func (cmd *Command) parseFlags(args []string) (*Config, string, error) { - fs := flag.NewFlagSet("", flag.ContinueOnError) - configPath := fs.String("config", "", "") - fs.SetOutput(cmd.Stderr) - fs.Usage = cmd.printUsage - if err := fs.Parse(args); err != nil { - return nil, "", err - } - - // Parse configuration file from disk. - if *configPath == "" { - return nil, "", fmt.Errorf("config required") - } - - // Parse config. - config := Config{ - Meta: meta.NewConfig(), - Data: tsdb.NewConfig(), - } - if _, err := toml.DecodeFile(*configPath, &config); err != nil { - return nil, "", err - } - - // Require output path. - path := fs.Arg(0) - if path == "" { - return nil, "", fmt.Errorf("snapshot path required") - } - - return &config, path, nil -} - -func closeAll(a []io.Closer) { - for _, c := range a { - _ = c.Close() - } -} - -// unpack expands the files in the snapshot archive into a directory. -func (cmd *Command) unpack(mr *snapshot.MultiReader, config *Config) error { - // Loop over files and extract. - for { - // Read entry header. - sf, err := mr.Next() - if err == io.EOF { - break - } else if err != nil { - return fmt.Errorf("next: entry=%s, err=%s", sf.Name, err) - } - - // Log progress. - fmt.Fprintf(os.Stdout, "unpacking: %s (%d bytes)\n", sf.Name, sf.Size) - - // Handle meta and tsdb files separately. - switch sf.Name { - case "meta": - if err := cmd.unpackMeta(mr, sf, config); err != nil { - return fmt.Errorf("meta: %s", err) - } - default: - if err := cmd.unpackData(mr, sf, config); err != nil { - return fmt.Errorf("data: %s", err) - } - } - } - - return nil -} - -// unpackMeta reads the metadata from the snapshot and initializes a raft -// cluster and replaces the root metadata. -func (cmd *Command) unpackMeta(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { - // Read meta into buffer. - var buf bytes.Buffer - if _, err := io.CopyN(&buf, mr, sf.Size); err != nil { - return fmt.Errorf("copy: %s", err) - } - - // Unpack into metadata. - var data meta.Data - if err := data.UnmarshalBinary(buf.Bytes()); err != nil { - return fmt.Errorf("unmarshal: %s", err) - } - - // Copy meta config and remove peers so it starts in single mode. - c := config.Meta - c.Peers = nil - - // Initialize meta store. - store := meta.NewStore(config.Meta) - store.RaftListener = newNopListener() - store.ExecListener = newNopListener() - - // Determine advertised address. - _, port, err := net.SplitHostPort(config.Meta.BindAddress) - if err != nil { - return fmt.Errorf("split bind address: %s", err) - } - hostport := net.JoinHostPort(config.Meta.Hostname, port) - - // Resolve address. - addr, err := net.ResolveTCPAddr("tcp", hostport) - if err != nil { - return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) - } - store.Addr = addr - - // Open the meta store. - if err := store.Open(); err != nil { - return fmt.Errorf("open store: %s", err) - } - defer store.Close() - - // Wait for the store to be ready or error. - select { - case <-store.Ready(): - case err := <-store.Err(): - return err - } - - // Force set the full metadata. - if err := store.SetData(&data); err != nil { - return fmt.Errorf("set data: %s", err) - } - - return nil -} - -func (cmd *Command) unpackData(mr *snapshot.MultiReader, sf snapshot.File, config *Config) error { - path := filepath.Join(config.Data.Dir, sf.Name) - // Create parent directory for output file. - if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { - return fmt.Errorf("mkdir: entry=%s, err=%s", sf.Name, err) - } - - // Create output file. - f, err := os.Create(path) - if err != nil { - return fmt.Errorf("create: entry=%s, err=%s", sf.Name, err) - } - defer f.Close() - - // Copy contents from reader. - if _, err := io.CopyN(f, mr, sf.Size); err != nil { - return fmt.Errorf("copy: entry=%s, err=%s", sf.Name, err) - } - - return nil -} - -// printUsage prints the usage message to STDERR. -func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stderr, `usage: influxd restore [flags] PATH - -restore uses a snapshot of a data node to rebuild a cluster. - - -config - Set the path to the configuration file. -`) -} - -// Config represents a partial config for rebuilding the server. -type Config struct { - Meta *meta.Config `toml:"meta"` - Data tsdb.Config `toml:"data"` -} - -type nopListener struct { - closing chan struct{} -} - -func newNopListener() *nopListener { - return &nopListener{make(chan struct{})} -} - -func (ln *nopListener) Accept() (net.Conn, error) { - <-ln.closing - return nil, errors.New("listener closing") -} - -func (ln *nopListener) Close() error { close(ln.closing); return nil } -func (ln *nopListener) Addr() net.Addr { return nil } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go deleted file mode 100644 index 6e3143f25..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/restore/restore_test.go +++ /dev/null @@ -1,155 +0,0 @@ -package restore_test - -/* -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - main "github.com/influxdb/influxdb/cmd/influxd" - "github.com/influxdb/influxdb/tsdb" -) - -func newConfig(path string, port int) main.Config { - config := main.NewConfig() - config.Port = port - config.Broker.Enabled = true - config.Broker.Dir = filepath.Join(path, "broker") - - config.Data.Enabled = true - config.Data.Dir = filepath.Join(path, "data") - return *config -} - -// Ensure the restore command can expand a snapshot and bootstrap a broker. -func TestRestoreCommand(t *testing.T) { - if testing.Short() { - t.Skip("skipping TestRestoreCommand") - } - - now := time.Now() - - // Create root path to server. - path := tempfile() - defer os.Remove(path) - - // Parse configuration. - config := newConfig(path, 8900) - - // Start server. - cmd := main.NewRunCommand() - node := cmd.Open(&config, "") - if node.Broker == nil { - t.Fatal("cannot run broker") - } else if node.DataNode == nil { - t.Fatal("cannot run server") - } - b := node.Broker - s := node.DataNode - - // Create data. - if err := s.CreateDatabase("db"); err != nil { - t.Fatalf("cannot create database: %s", err) - } - if index, err := s.WriteSeries("db", "default", []tsdb.Point{tsdb.NewPoint("cpu", nil, map[string]interface{}{"value": float64(100)}, now)}); err != nil { - t.Fatalf("cannot write series: %s", err) - } else if err = s.Sync(1, index); err != nil { - t.Fatalf("shard sync: %s", err) - } - - // Create snapshot writer. - sw, err := s.CreateSnapshotWriter() - if err != nil { - t.Fatalf("create snapshot writer: %s", err) - } - - // Snapshot to file. - sspath := tempfile() - f, err := os.Create(sspath) - if err != nil { - t.Fatal(err) - } - sw.WriteTo(f) - f.Close() - - // Stop server. - node.Close() - - // Remove data & broker directories. - if err := os.RemoveAll(path); err != nil { - t.Fatalf("remove: %s", err) - } - - // Execute the restore. - if err := NewRestoreCommand().Restore(&config, sspath); err != nil { - t.Fatal(err) - } - - // Rewrite config to a new port and re-parse. - config = newConfig(path, 8910) - - // Restart server. - cmd = main.NewRunCommand() - node = cmd.Open(&config, "") - if b == nil { - t.Fatal("cannot run broker") - } else if s == nil { - t.Fatal("cannot run server") - } - b = node.Broker - s = node.DataNode - - // Write new data. - if err := s.CreateDatabase("newdb"); err != nil { - t.Fatalf("cannot create new database: %s", err) - } - if index, err := s.WriteSeries("newdb", "default", []tsdb.Point{tsdb.NewPoint("mem", nil, map[string]interface{}{"value": float64(1000)}, now)}); err != nil { - t.Fatalf("cannot write new series: %s", err) - } else if err = s.Sync(2, index); err != nil { - t.Fatalf("shard sync: %s", err) - } - - // Read series data. - if v, err := s.ReadSeries("db", "default", "cpu", nil, now); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(100)}) { - t.Fatalf("read series(0) mismatch: %#v", v) - } - - // Read new series data. - if v, err := s.ReadSeries("newdb", "default", "mem", nil, now); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(v, map[string]interface{}{"value": float64(1000)}) { - t.Fatalf("read series(1) mismatch: %#v", v) - } - - // Stop server. - node.Close() -} - -// RestoreCommand is a test wrapper for main.RestoreCommand. -type RestoreCommand struct { - *main.RestoreCommand - Stderr bytes.Buffer -} - -// NewRestoreCommand returns a new instance of RestoreCommand. -func NewRestoreCommand() *RestoreCommand { - cmd := &RestoreCommand{RestoreCommand: main.NewRestoreCommand()} - cmd.RestoreCommand.Stderr = &cmd.Stderr - return cmd -} - -// MustReadFile reads data from a file. Panic on error. -func MustReadFile(filename string) []byte { - b, err := ioutil.ReadFile(filename) - if err != nil { - panic(err.Error()) - } - return b -} -*/ diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go deleted file mode 100644 index 629fd3eec..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/command.go +++ /dev/null @@ -1,235 +0,0 @@ -package run - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "time" - - "github.com/BurntSushi/toml" -) - -const logo = ` - 8888888 .d888 888 8888888b. 888888b. - 888 d88P" 888 888 "Y88b 888 "88b - 888 888 888 888 888 888 .88P - 888 88888b. 888888 888 888 888 888 888 888 888 8888888K. - 888 888 "88b 888 888 888 888 Y8bd8P' 888 888 888 "Y88b - 888 888 888 888 888 888 888 X88K 888 888 888 888 - 888 888 888 888 888 Y88b 888 .d8""8b. 888 .d88P 888 d88P - 8888888 888 888 888 888 "Y88888 888 888 8888888P" 8888888P" - -` - -// Command represents the command executed by "influxd run". -type Command struct { - Version string - Branch string - Commit string - - closing chan struct{} - Closed chan struct{} - - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - Server *Server -} - -// NewCommand return a new instance of Command. -func NewCommand() *Command { - return &Command{ - closing: make(chan struct{}), - Closed: make(chan struct{}), - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run parses the config from args and runs the server. -func (cmd *Command) Run(args ...string) error { - // Parse the command line flags. - options, err := cmd.ParseFlags(args...) - if err != nil { - return err - } - - // Print sweet InfluxDB logo. - fmt.Print(logo) - - // Write the PID file. - if err := cmd.writePIDFile(options.PIDFile); err != nil { - return fmt.Errorf("write pid file: %s", err) - } - - // Set parallelism. - runtime.GOMAXPROCS(runtime.NumCPU()) - - // Turn on block profiling to debug stuck databases - runtime.SetBlockProfileRate(int(1 * time.Second)) - - // Parse config - config, err := cmd.ParseConfig(options.ConfigPath) - if err != nil { - return fmt.Errorf("parse config: %s", err) - } - - // Apply any environment variables on top of the parsed config - if err := config.ApplyEnvOverrides(); err != nil { - return fmt.Errorf("apply env config: %v", err) - } - - // Override config hostname if specified in the command line args. - if options.Hostname != "" { - config.Meta.Hostname = options.Hostname - } - - if options.Join != "" { - config.Meta.Peers = strings.Split(options.Join, ",") - } - - // Validate the configuration. - if err := config.Validate(); err != nil { - return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`.", err) - } - - // Create server from config and start it. - s, err := NewServer(config, cmd.Version) - if err != nil { - return fmt.Errorf("create server: %s", err) - } - s.CPUProfile = options.CPUProfile - s.MemProfile = options.MemProfile - if err := s.Open(); err != nil { - return fmt.Errorf("open server: %s", err) - } - cmd.Server = s - - // Mark start-up in log. - log.Printf("InfluxDB starting, version %s, branch %s, commit %s", cmd.Version, cmd.Branch, cmd.Commit) - log.Println("GOMAXPROCS set to", runtime.GOMAXPROCS(0)) - - // Begin monitoring the server's error channel. - go cmd.monitorServerErrors() - - return nil -} - -// Close shuts down the server. -func (cmd *Command) Close() error { - defer close(cmd.Closed) - close(cmd.closing) - if cmd.Server != nil { - return cmd.Server.Close() - } - return nil -} - -func (cmd *Command) monitorServerErrors() { - logger := log.New(cmd.Stderr, "", log.LstdFlags) - for { - select { - case err := <-cmd.Server.Err(): - logger.Println(err) - case <-cmd.closing: - return - } - } -} - -// ParseFlags parses the command line flags from args and returns an options set. -func (cmd *Command) ParseFlags(args ...string) (Options, error) { - var options Options - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ConfigPath, "config", "", "") - fs.StringVar(&options.PIDFile, "pidfile", "", "") - fs.StringVar(&options.Hostname, "hostname", "", "") - fs.StringVar(&options.Join, "join", "", "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, usage) } - if err := fs.Parse(args); err != nil { - return Options{}, err - } - return options, nil -} - -// writePIDFile writes the process ID to path. -func (cmd *Command) writePIDFile(path string) error { - // Ignore if path is not set. - if path == "" { - return nil - } - - // Ensure the required directory structure exists. - err := os.MkdirAll(filepath.Dir(path), 0777) - if err != nil { - return fmt.Errorf("mkdir: %s", err) - } - - // Retrieve the PID and write it. - pid := strconv.Itoa(os.Getpid()) - if err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil { - return fmt.Errorf("write file: %s", err) - } - - return nil -} - -// ParseConfig parses the config at path. -// Returns a demo configuration if path is blank. -func (cmd *Command) ParseConfig(path string) (*Config, error) { - // Use demo configuration if no config path is specified. - if path == "" { - fmt.Fprintln(cmd.Stdout, "no configuration provided, using default settings") - return NewDemoConfig() - } - - fmt.Fprintf(cmd.Stdout, "Using configuration at: %s\n", path) - - config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { - return nil, err - } - - return config, nil -} - -var usage = `usage: run [flags] - -run starts the broker and data node server. If this is the first time running -the command then a new cluster will be initialized unless the -join argument -is used. - - -config - Set the path to the configuration file. - - -hostname - Override the hostname, the 'hostname' configuration - option will be overridden. - - -join - Joins the server to an existing cluster. - - -pidfile - Write process ID to a file. -` - -// Options represents the command line options that can be parsed. -type Options struct { - ConfigPath string - PIDFile string - Hostname string - Join string - CPUProfile string - MemProfile string -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go deleted file mode 100644 index 77515ca60..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config.go +++ /dev/null @@ -1,227 +0,0 @@ -package run - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "reflect" - "strconv" - "strings" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/monitor" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tsdb" -) - -// Config represents the configuration format for the influxd binary. -type Config struct { - Meta *meta.Config `toml:"meta"` - Data tsdb.Config `toml:"data"` - Cluster cluster.Config `toml:"cluster"` - Retention retention.Config `toml:"retention"` - Precreator precreator.Config `toml:"shard-precreation"` - - Admin admin.Config `toml:"admin"` - HTTPD httpd.Config `toml:"http"` - Graphites []graphite.Config `toml:"graphite"` - Collectd collectd.Config `toml:"collectd"` - OpenTSDB opentsdb.Config `toml:"opentsdb"` - UDPs []udp.Config `toml:"udp"` - - // Snapshot SnapshotConfig `toml:"snapshot"` - Monitoring monitor.Config `toml:"monitoring"` - ContinuousQuery continuous_querier.Config `toml:"continuous_queries"` - - HintedHandoff hh.Config `toml:"hinted-handoff"` - - // Server reporting - ReportingDisabled bool `toml:"reporting-disabled"` -} - -// NewConfig returns an instance of Config with reasonable defaults. -func NewConfig() *Config { - c := &Config{} - c.Meta = meta.NewConfig() - c.Data = tsdb.NewConfig() - c.Cluster = cluster.NewConfig() - c.Precreator = precreator.NewConfig() - - c.Admin = admin.NewConfig() - c.HTTPD = httpd.NewConfig() - c.Collectd = collectd.NewConfig() - c.OpenTSDB = opentsdb.NewConfig() - c.Graphites = append(c.Graphites, graphite.NewConfig()) - - c.Monitoring = monitor.NewConfig() - c.ContinuousQuery = continuous_querier.NewConfig() - c.Retention = retention.NewConfig() - c.HintedHandoff = hh.NewConfig() - - return c -} - -// NewDemoConfig returns the config that runs when no config is specified. -func NewDemoConfig() (*Config, error) { - c := NewConfig() - - var homeDir string - // By default, store meta and data files in current users home directory - u, err := user.Current() - if err == nil { - homeDir = u.HomeDir - } else if os.Getenv("HOME") != "" { - homeDir = os.Getenv("HOME") - } else { - return nil, fmt.Errorf("failed to determine current user for storage") - } - - c.Meta.Dir = filepath.Join(homeDir, ".influxdb/meta") - c.Data.Dir = filepath.Join(homeDir, ".influxdb/data") - c.HintedHandoff.Dir = filepath.Join(homeDir, ".influxdb/hh") - c.Data.WALDir = filepath.Join(homeDir, ".influxdb/wal") - - c.Admin.Enabled = true - c.Monitoring.Enabled = false - - return c, nil -} - -// Validate returns an error if the config is invalid. -func (c *Config) Validate() error { - if c.Meta.Dir == "" { - return errors.New("Meta.Dir must be specified") - } else if c.Data.Dir == "" { - return errors.New("Data.Dir must be specified") - } else if c.HintedHandoff.Dir == "" { - return errors.New("HintedHandoff.Dir must be specified") - } else if c.Data.WALDir == "" { - return errors.New("Data.WALDir must be specified") - } - - for _, g := range c.Graphites { - if err := g.Validate(); err != nil { - return fmt.Errorf("invalid graphite config: %v", err) - } - } - return nil -} - -func (c *Config) ApplyEnvOverrides() error { - return c.applyEnvOverrides("INFLUXDB", reflect.ValueOf(c)) -} - -func (c *Config) applyEnvOverrides(prefix string, spec reflect.Value) error { - // If we have a pointer, dereference it - s := spec - if spec.Kind() == reflect.Ptr { - s = spec.Elem() - } - - // Make sure we have struct - if s.Kind() != reflect.Struct { - return nil - } - - typeOfSpec := s.Type() - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - // Get the toml tag to determine what env var name to use - configName := typeOfSpec.Field(i).Tag.Get("toml") - // Replace hyphens with underscores to avoid issues with shells - configName = strings.Replace(configName, "-", "_", -1) - fieldName := typeOfSpec.Field(i).Name - - // Skip any fields that we cannot set - if f.CanSet() || f.Kind() == reflect.Slice { - - // Use the upper-case prefix and toml name for the env var - key := strings.ToUpper(configName) - if prefix != "" { - key = strings.ToUpper(fmt.Sprintf("%s_%s", prefix, configName)) - } - value := os.Getenv(key) - - // If the type is s slice, apply to each using the index as a suffix - // e.g. GRAPHITE_0 - if f.Kind() == reflect.Slice || f.Kind() == reflect.Array { - for i := 0; i < f.Len(); i++ { - if err := c.applyEnvOverrides(fmt.Sprintf("%s_%d", key, i), f.Index(i)); err != nil { - return err - } - } - continue - } - - // If it's a sub-config, recursively apply - if f.Kind() == reflect.Struct || f.Kind() == reflect.Ptr { - if err := c.applyEnvOverrides(key, f); err != nil { - return err - } - continue - } - - // Skip any fields we don't have a value to set - if value == "" { - continue - } - - switch f.Kind() { - case reflect.String: - f.SetString(value) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - - var intValue int64 - - // Handle toml.Duration - if f.Type().Name() == "Duration" { - dur, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) - } - intValue = dur.Nanoseconds() - } else { - var err error - intValue, err = strconv.ParseInt(value, 0, f.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) - } - } - - f.SetInt(intValue) - case reflect.Bool: - boolValue, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) - - } - f.SetBool(boolValue) - case reflect.Float32, reflect.Float64: - floatValue, err := strconv.ParseFloat(value, f.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", key, fieldName, f.Type().String(), value) - - } - f.SetFloat(floatValue) - default: - if err := c.applyEnvOverrides(key, f); err != nil { - return err - } - } - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go deleted file mode 100644 index 8135aa014..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_command.go +++ /dev/null @@ -1,73 +0,0 @@ -package run - -import ( - "flag" - "fmt" - "io" - "os" - - "github.com/BurntSushi/toml" -) - -// PrintConfigCommand represents the command executed by "influxd config". -type PrintConfigCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPrintConfigCommand return a new instance of PrintConfigCommand. -func NewPrintConfigCommand() *PrintConfigCommand { - return &PrintConfigCommand{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run parses and prints the current config loaded. -func (cmd *PrintConfigCommand) Run(args ...string) error { - // Parse command flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - configPath := fs.String("config", "", "") - hostname := fs.String("hostname", "", "") - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } - if err := fs.Parse(args); err != nil { - return err - } - - // Parse config from path. - config, err := cmd.parseConfig(*configPath) - if err != nil { - return fmt.Errorf("parse config: %s", err) - } - - // Override config properties. - if *hostname != "" { - config.Meta.Hostname = *hostname - } - - toml.NewEncoder(cmd.Stdout).Encode(config) - fmt.Fprint(cmd.Stdout, "\n") - - return nil -} - -// ParseConfig parses the config at path. -// Returns a demo configuration if path is blank. -func (cmd *PrintConfigCommand) parseConfig(path string) (*Config, error) { - if path == "" { - return NewDemoConfig() - } - - config := NewConfig() - if _, err := toml.DecodeFile(path, &config); err != nil { - return nil, err - } - return config, nil -} - -var printConfigUsage = `usage: config - - config displays the default configuration -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go deleted file mode 100644 index ffcaf2533..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/config_test.go +++ /dev/null @@ -1,144 +0,0 @@ -package run_test - -import ( - "os" - "testing" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/cmd/influxd/run" -) - -// Ensure the configuration can be parsed. -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c run.Config - if _, err := toml.Decode(` -[meta] -dir = "/tmp/meta" - -[data] -dir = "/tmp/data" - -[cluster] - -[admin] -bind-address = ":8083" - -[http] -bind-address = ":8087" - -[[graphite]] -protocol = "udp" - -[[graphite]] -protocol = "tcp" - -[collectd] -bind-address = ":1000" - -[opentsdb] -bind-address = ":2000" - -[[udp]] -bind-address = ":4444" - -[monitoring] -enabled = true - -[continuous_queries] -enabled = true -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.Meta.Dir != "/tmp/meta" { - t.Fatalf("unexpected meta dir: %s", c.Meta.Dir) - } else if c.Data.Dir != "/tmp/data" { - t.Fatalf("unexpected data dir: %s", c.Data.Dir) - } else if c.Admin.BindAddress != ":8083" { - t.Fatalf("unexpected admin bind address: %s", c.Admin.BindAddress) - } else if c.HTTPD.BindAddress != ":8087" { - t.Fatalf("unexpected api bind address: %s", c.HTTPD.BindAddress) - } else if len(c.Graphites) != 2 { - t.Fatalf("unexpected graphites count: %d", len(c.Graphites)) - } else if c.Graphites[0].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) - } else if c.Graphites[1].Protocol != "tcp" { - t.Fatalf("unexpected graphite protocol(1): %s", c.Graphites[1].Protocol) - } else if c.Collectd.BindAddress != ":1000" { - t.Fatalf("unexpected collectd bind address: %s", c.Collectd.BindAddress) - } else if c.OpenTSDB.BindAddress != ":2000" { - t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDB.BindAddress) - } else if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) - } else if c.Monitoring.Enabled != true { - t.Fatalf("unexpected monitoring enabled: %v", c.Monitoring.Enabled) - } else if c.ContinuousQuery.Enabled != true { - t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) - } -} - -// Ensure the configuration can be parsed. -func TestConfig_Parse_EnvOverride(t *testing.T) { - // Parse configuration. - var c run.Config - if _, err := toml.Decode(` -[meta] -dir = "/tmp/meta" - -[data] -dir = "/tmp/data" - -[cluster] - -[admin] -bind-address = ":8083" - -[http] -bind-address = ":8087" - -[[graphite]] -protocol = "udp" - -[[graphite]] -protocol = "tcp" - -[collectd] -bind-address = ":1000" - -[opentsdb] -bind-address = ":2000" - -[[udp]] -bind-address = ":4444" - -[monitoring] -enabled = true - -[continuous_queries] -enabled = true -`, &c); err != nil { - t.Fatal(err) - } - - if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := c.ApplyEnvOverrides(); err != nil { - t.Fatalf("failed to apply env overrides: %v", err) - } - - if c.UDPs[0].BindAddress != ":4444" { - t.Fatalf("unexpected udp bind address: %s", c.UDPs[0].BindAddress) - } - - if c.Graphites[1].Protocol != "udp" { - t.Fatalf("unexpected graphite protocol(0): %s", c.Graphites[0].Protocol) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go deleted file mode 100644 index ab65817d7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server.go +++ /dev/null @@ -1,536 +0,0 @@ -package run - -import ( - "bytes" - "fmt" - "log" - "net" - "net/http" - "os" - "runtime" - "runtime/pprof" - "strings" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/services/admin" - "github.com/influxdb/influxdb/services/collectd" - "github.com/influxdb/influxdb/services/continuous_querier" - "github.com/influxdb/influxdb/services/graphite" - "github.com/influxdb/influxdb/services/hh" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/services/opentsdb" - "github.com/influxdb/influxdb/services/precreator" - "github.com/influxdb/influxdb/services/retention" - "github.com/influxdb/influxdb/services/snapshotter" - "github.com/influxdb/influxdb/services/udp" - "github.com/influxdb/influxdb/tcp" - "github.com/influxdb/influxdb/tsdb" - _ "github.com/influxdb/influxdb/tsdb/engine" -) - -// Server represents a container for the metadata and storage data and services. -// It is built using a Config and it manages the startup and shutdown of all -// services in the proper order. -type Server struct { - version string // Build version - - err chan error - closing chan struct{} - - Hostname string - BindAddress string - Listener net.Listener - - MetaStore *meta.Store - TSDBStore *tsdb.Store - QueryExecutor *tsdb.QueryExecutor - PointsWriter *cluster.PointsWriter - ShardWriter *cluster.ShardWriter - ShardMapper *cluster.ShardMapper - HintedHandoff *hh.Service - - Services []Service - - // These references are required for the tcp muxer. - ClusterService *cluster.Service - SnapshotterService *snapshotter.Service - - // Server reporting - reportingDisabled bool - - // Profiling - CPUProfile string - MemProfile string -} - -// NewServer returns a new instance of Server built from a config. -func NewServer(c *Config, version string) (*Server, error) { - // Construct base meta store and data store. - tsdbStore := tsdb.NewStore(c.Data.Dir) - tsdbStore.EngineOptions.Config = c.Data - - s := &Server{ - version: version, - err: make(chan error), - closing: make(chan struct{}), - - Hostname: c.Meta.Hostname, - BindAddress: c.Meta.BindAddress, - - MetaStore: meta.NewStore(c.Meta), - TSDBStore: tsdbStore, - - reportingDisabled: c.ReportingDisabled, - } - - // Copy TSDB configuration. - s.TSDBStore.EngineOptions.MaxWALSize = c.Data.MaxWALSize - s.TSDBStore.EngineOptions.WALFlushInterval = time.Duration(c.Data.WALFlushInterval) - s.TSDBStore.EngineOptions.WALPartitionFlushDelay = time.Duration(c.Data.WALPartitionFlushDelay) - - // Set the shard mapper - s.ShardMapper = cluster.NewShardMapper(time.Duration(c.Cluster.ShardMapperTimeout)) - s.ShardMapper.ForceRemoteMapping = c.Cluster.ForceRemoteShardMapping - s.ShardMapper.MetaStore = s.MetaStore - s.ShardMapper.TSDBStore = s.TSDBStore - - // Initialize query executor. - s.QueryExecutor = tsdb.NewQueryExecutor(s.TSDBStore) - s.QueryExecutor.MetaStore = s.MetaStore - s.QueryExecutor.MetaStatementExecutor = &meta.StatementExecutor{Store: s.MetaStore} - s.QueryExecutor.ShardMapper = s.ShardMapper - - // Set the shard writer - s.ShardWriter = cluster.NewShardWriter(time.Duration(c.Cluster.ShardWriterTimeout)) - s.ShardWriter.MetaStore = s.MetaStore - - // Create the hinted handoff service - s.HintedHandoff = hh.NewService(c.HintedHandoff, s.ShardWriter) - - // Initialize points writer. - s.PointsWriter = cluster.NewPointsWriter() - s.PointsWriter.WriteTimeout = time.Duration(c.Cluster.WriteTimeout) - s.PointsWriter.MetaStore = s.MetaStore - s.PointsWriter.TSDBStore = s.TSDBStore - s.PointsWriter.ShardWriter = s.ShardWriter - s.PointsWriter.HintedHandoff = s.HintedHandoff - - // Append services. - s.appendClusterService(c.Cluster) - s.appendPrecreatorService(c.Precreator) - s.appendSnapshotterService() - s.appendAdminService(c.Admin) - s.appendContinuousQueryService(c.ContinuousQuery) - s.appendHTTPDService(c.HTTPD) - s.appendCollectdService(c.Collectd) - if err := s.appendOpenTSDBService(c.OpenTSDB); err != nil { - return nil, err - } - for _, g := range c.UDPs { - s.appendUDPService(g) - } - s.appendRetentionPolicyService(c.Retention) - for _, g := range c.Graphites { - if err := s.appendGraphiteService(g); err != nil { - return nil, err - } - } - - return s, nil -} - -func (s *Server) appendClusterService(c cluster.Config) { - srv := cluster.NewService(c) - srv.TSDBStore = s.TSDBStore - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - s.ClusterService = srv -} - -func (s *Server) appendSnapshotterService() { - srv := snapshotter.NewService() - srv.TSDBStore = s.TSDBStore - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - s.SnapshotterService = srv -} - -func (s *Server) appendRetentionPolicyService(c retention.Config) { - if !c.Enabled { - return - } - srv := retention.NewService(c) - srv.MetaStore = s.MetaStore - srv.TSDBStore = s.TSDBStore - s.Services = append(s.Services, srv) -} - -func (s *Server) appendAdminService(c admin.Config) { - if !c.Enabled { - return - } - srv := admin.NewService(c) - s.Services = append(s.Services, srv) -} - -func (s *Server) appendHTTPDService(c httpd.Config) { - if !c.Enabled { - return - } - srv := httpd.NewService(c) - srv.Handler.MetaStore = s.MetaStore - srv.Handler.QueryExecutor = s.QueryExecutor - srv.Handler.PointsWriter = s.PointsWriter - srv.Handler.Version = s.version - - // If a ContinuousQuerier service has been started, attach it. - for _, srvc := range s.Services { - if cqsrvc, ok := srvc.(continuous_querier.ContinuousQuerier); ok { - srv.Handler.ContinuousQuerier = cqsrvc - } - } - - s.Services = append(s.Services, srv) -} - -func (s *Server) appendCollectdService(c collectd.Config) { - if !c.Enabled { - return - } - srv := collectd.NewService(c) - srv.MetaStore = s.MetaStore - srv.PointsWriter = s.PointsWriter - s.Services = append(s.Services, srv) -} - -func (s *Server) appendOpenTSDBService(c opentsdb.Config) error { - if !c.Enabled { - return nil - } - srv, err := opentsdb.NewService(c) - if err != nil { - return err - } - srv.PointsWriter = s.PointsWriter - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendGraphiteService(c graphite.Config) error { - if !c.Enabled { - return nil - } - srv, err := graphite.NewService(c) - if err != nil { - return err - } - - srv.PointsWriter = s.PointsWriter - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendPrecreatorService(c precreator.Config) error { - if !c.Enabled { - return nil - } - srv, err := precreator.NewService(c) - if err != nil { - return err - } - - srv.MetaStore = s.MetaStore - s.Services = append(s.Services, srv) - return nil -} - -func (s *Server) appendUDPService(c udp.Config) { - if !c.Enabled { - return - } - srv := udp.NewService(c) - srv.PointsWriter = s.PointsWriter - s.Services = append(s.Services, srv) -} - -func (s *Server) appendContinuousQueryService(c continuous_querier.Config) { - if !c.Enabled { - return - } - srv := continuous_querier.NewService(c) - srv.MetaStore = s.MetaStore - srv.QueryExecutor = s.QueryExecutor - srv.PointsWriter = s.PointsWriter - s.Services = append(s.Services, srv) -} - -// Err returns an error channel that multiplexes all out of band errors received from all services. -func (s *Server) Err() <-chan error { return s.err } - -// Open opens the meta and data store and all services. -func (s *Server) Open() error { - if err := func() error { - // Start profiling, if set. - startProfile(s.CPUProfile, s.MemProfile) - - host, port, err := s.hostAddr() - if err != nil { - return err - } - - hostport := net.JoinHostPort(host, port) - addr, err := net.ResolveTCPAddr("tcp", hostport) - if err != nil { - return fmt.Errorf("resolve tcp: addr=%s, err=%s", hostport, err) - } - s.MetaStore.Addr = addr - s.MetaStore.RemoteAddr = &tcpaddr{hostport} - - // Open shared TCP connection. - ln, err := net.Listen("tcp", s.BindAddress) - if err != nil { - return fmt.Errorf("listen: %s", err) - } - s.Listener = ln - - // The port 0 is used, we need to retrieve the port assigned by the kernel - if strings.HasSuffix(s.BindAddress, ":0") { - s.MetaStore.Addr = ln.Addr() - } - - // Multiplex listener. - mux := tcp.NewMux() - s.MetaStore.RaftListener = mux.Listen(meta.MuxRaftHeader) - s.MetaStore.ExecListener = mux.Listen(meta.MuxExecHeader) - s.MetaStore.RPCListener = mux.Listen(meta.MuxRPCHeader) - - s.ClusterService.Listener = mux.Listen(cluster.MuxHeader) - s.SnapshotterService.Listener = mux.Listen(snapshotter.MuxHeader) - go mux.Serve(ln) - - // Open meta store. - if err := s.MetaStore.Open(); err != nil { - return fmt.Errorf("open meta store: %s", err) - } - go s.monitorErrorChan(s.MetaStore.Err()) - - // Wait for the store to initialize. - <-s.MetaStore.Ready() - - // Open TSDB store. - if err := s.TSDBStore.Open(); err != nil { - return fmt.Errorf("open tsdb store: %s", err) - } - - // Open the hinted handoff service - if err := s.HintedHandoff.Open(); err != nil { - return fmt.Errorf("open hinted handoff: %s", err) - } - - for _, service := range s.Services { - if err := service.Open(); err != nil { - return fmt.Errorf("open service: %s", err) - } - } - - // Start the reporting service, if not disabled. - if !s.reportingDisabled { - go s.startServerReporting() - } - - return nil - - }(); err != nil { - s.Close() - return err - } - - return nil -} - -// Close shuts down the meta and data stores and all services. -func (s *Server) Close() error { - stopProfile() - - if s.Listener != nil { - s.Listener.Close() - } - if s.MetaStore != nil { - s.MetaStore.Close() - } - if s.TSDBStore != nil { - s.TSDBStore.Close() - } - if s.HintedHandoff != nil { - s.HintedHandoff.Close() - } - for _, service := range s.Services { - service.Close() - } - - close(s.closing) - return nil -} - -// startServerReporting starts periodic server reporting. -func (s *Server) startServerReporting() { - for { - select { - case <-s.closing: - return - default: - } - if err := s.MetaStore.WaitForLeader(30 * time.Second); err != nil { - log.Printf("no leader available for reporting: %s", err.Error()) - time.Sleep(time.Second) - continue - } - s.reportServer() - <-time.After(24 * time.Hour) - } -} - -// reportServer reports anonymous statistics about the system. -func (s *Server) reportServer() { - dis, err := s.MetaStore.Databases() - if err != nil { - log.Printf("failed to retrieve databases for reporting: %s", err.Error()) - return - } - numDatabases := len(dis) - - numMeasurements := 0 - numSeries := 0 - for _, di := range dis { - d := s.TSDBStore.DatabaseIndex(di.Name) - if d == nil { - // No data in this store for this database. - continue - } - m, s := d.MeasurementSeriesCounts() - numMeasurements += m - numSeries += s - } - - clusterID, err := s.MetaStore.ClusterID() - if err != nil { - log.Printf("failed to retrieve cluster ID for reporting: %s", err.Error()) - return - } - - json := fmt.Sprintf(`[{ - "name":"reports", - "columns":["os", "arch", "version", "server_id", "cluster_id", "num_series", "num_measurements", "num_databases"], - "points":[["%s", "%s", "%s", "%x", "%x", "%d", "%d", "%d"]] - }]`, runtime.GOOS, runtime.GOARCH, s.version, s.MetaStore.NodeID(), clusterID, numSeries, numMeasurements, numDatabases) - - data := bytes.NewBufferString(json) - - log.Printf("Sending anonymous usage statistics to m.influxdb.com") - - client := http.Client{Timeout: time.Duration(5 * time.Second)} - go client.Post("http://m.influxdb.com:8086/db/reporting/series?u=reporter&p=influxdb", "application/json", data) -} - -// monitorErrorChan reads an error channel and resends it through the server. -func (s *Server) monitorErrorChan(ch <-chan error) { - for { - select { - case err, ok := <-ch: - if !ok { - return - } - s.err <- err - case <-s.closing: - return - } - } -} - -// hostAddr returns the host and port that remote nodes will use to reach this -// node. -func (s *Server) hostAddr() (string, string, error) { - // Resolve host to address. - _, port, err := net.SplitHostPort(s.BindAddress) - if err != nil { - return "", "", fmt.Errorf("split bind address: %s", err) - } - - host := s.Hostname - - // See if we might have a port that will override the BindAddress port - if host != "" && host[len(host)-1] >= '0' && host[len(host)-1] <= '9' && strings.Contains(host, ":") { - hostArg, portArg, err := net.SplitHostPort(s.Hostname) - if err != nil { - return "", "", err - } - - if hostArg != "" { - host = hostArg - } - - if portArg != "" { - port = portArg - } - } - return host, port, nil -} - -// Service represents a service attached to the server. -type Service interface { - Open() error - Close() error -} - -// prof stores the file locations of active profiles. -var prof struct { - cpu *os.File - mem *os.File -} - -// StartProfile initializes the cpu and memory profile, if specified. -func startProfile(cpuprofile, memprofile string) { - if cpuprofile != "" { - f, err := os.Create(cpuprofile) - if err != nil { - log.Fatalf("cpuprofile: %v", err) - } - log.Printf("writing CPU profile to: %s\n", cpuprofile) - prof.cpu = f - pprof.StartCPUProfile(prof.cpu) - } - - if memprofile != "" { - f, err := os.Create(memprofile) - if err != nil { - log.Fatalf("memprofile: %v", err) - } - log.Printf("writing mem profile to: %s\n", memprofile) - prof.mem = f - runtime.MemProfileRate = 4096 - } - -} - -// StopProfile closes the cpu and memory profiles if they are running. -func stopProfile() { - if prof.cpu != nil { - pprof.StopCPUProfile() - prof.cpu.Close() - log.Println("CPU profile stopped") - } - if prof.mem != nil { - pprof.Lookup("heap").WriteTo(prof.mem, 0) - prof.mem.Close() - log.Println("mem profile stopped") - } -} - -type tcpaddr struct{ host string } - -func (a *tcpaddr) Network() string { return "tcp" } -func (a *tcpaddr) String() string { return a.host } diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go deleted file mode 100644 index adabdc19e..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_helpers_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// This package is a set of convenience helpers and structs to make integration testing easier -package run_test - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net/http" - "net/url" - "os" - "regexp" - "strings" - "testing" - "time" - - "github.com/influxdb/influxdb/cmd/influxd/run" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/services/httpd" - "github.com/influxdb/influxdb/toml" -) - -// Server represents a test wrapper for run.Server. -type Server struct { - *run.Server - Config *run.Config -} - -// NewServer returns a new instance of Server. -func NewServer(c *run.Config) *Server { - srv, _ := run.NewServer(c, "testServer") - s := Server{ - Server: srv, - Config: c, - } - s.TSDBStore.EngineOptions.Config = c.Data - configureLogging(&s) - return &s -} - -// OpenServer opens a test server. -func OpenServer(c *run.Config, joinURLs string) *Server { - s := NewServer(c) - configureLogging(s) - if err := s.Open(); err != nil { - panic(err.Error()) - } - - return s -} - -// OpenServerWithVersion opens a test server with a specific version. -func OpenServerWithVersion(c *run.Config, version string) *Server { - srv, _ := run.NewServer(c, version) - s := Server{ - Server: srv, - Config: c, - } - configureLogging(&s) - if err := s.Open(); err != nil { - panic(err.Error()) - } - - return &s -} - -// Close shuts down the server and removes all temporary paths. -func (s *Server) Close() { - os.RemoveAll(s.Config.Meta.Dir) - os.RemoveAll(s.Config.Data.Dir) - os.RemoveAll(s.Config.HintedHandoff.Dir) - s.Server.Close() -} - -// URL returns the base URL for the httpd endpoint. -func (s *Server) URL() string { - for _, service := range s.Services { - if service, ok := service.(*httpd.Service); ok { - return "http://" + service.Addr().String() - } - } - panic("httpd server not found in services") -} - -// CreateDatabaseAndRetentionPolicy will create the database and retention policy. -func (s *Server) CreateDatabaseAndRetentionPolicy(db string, rp *meta.RetentionPolicyInfo) error { - if _, err := s.MetaStore.CreateDatabase(db); err != nil { - return err - } else if _, err := s.MetaStore.CreateRetentionPolicy(db, rp); err != nil { - return err - } - return nil -} - -// Query executes a query against the server and returns the results. -func (s *Server) Query(query string) (results string, err error) { - return s.QueryWithParams(query, nil) -} - -// Query executes a query against the server and returns the results. -func (s *Server) QueryWithParams(query string, values url.Values) (results string, err error) { - if values == nil { - values = url.Values{} - } - values.Set("q", query) - resp, err := http.Get(s.URL() + "/query?" + values.Encode()) - if err != nil { - return "", err - //} else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusBadRequest { - } - body := string(MustReadAll(resp.Body)) - switch resp.StatusCode { - case http.StatusBadRequest: - if !expectPattern(".*error parsing query*.", body) { - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } - return body, nil - case http.StatusOK: - return body, nil - default: - return "", fmt.Errorf("unexpected status code: code=%d, body=%s", resp.StatusCode, body) - } -} - -// Write executes a write against the server and returns the results. -func (s *Server) Write(db, rp, body string, params url.Values) (results string, err error) { - if params == nil { - params = url.Values{} - } - if params.Get("db") == "" { - params.Set("db", db) - } - if params.Get("rp") == "" { - params.Set("rp", rp) - } - resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body)) - if err != nil { - return "", err - } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body)) - } - return string(MustReadAll(resp.Body)), nil -} - -// NewConfig returns the default config with temporary paths. -func NewConfig() *run.Config { - c := run.NewConfig() - c.ReportingDisabled = true - c.Meta.Dir = MustTempFile() - c.Meta.BindAddress = "127.0.0.1:0" - c.Meta.HeartbeatTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.ElectionTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.LeaderLeaseTimeout = toml.Duration(50 * time.Millisecond) - c.Meta.CommitTimeout = toml.Duration(5 * time.Millisecond) - - c.Data.Dir = MustTempFile() - c.Data.WALDir = MustTempFile() - - c.HintedHandoff.Dir = MustTempFile() - - c.HTTPD.Enabled = true - c.HTTPD.BindAddress = "127.0.0.1:0" - c.HTTPD.LogEnabled = testing.Verbose() - - return c -} - -func newRetentionPolicyInfo(name string, rf int, duration time.Duration) *meta.RetentionPolicyInfo { - return &meta.RetentionPolicyInfo{Name: name, ReplicaN: rf, Duration: duration} -} - -func maxFloat64() string { - maxFloat64, _ := json.Marshal(math.MaxFloat64) - return string(maxFloat64) -} - -func maxInt64() string { - maxInt64, _ := json.Marshal(^int64(0)) - return string(maxInt64) -} - -func now() time.Time { - return time.Now().UTC() -} - -func yesterday() time.Time { - return now().Add(-1 * time.Hour * 24) -} - -func mustParseTime(layout, value string) time.Time { - tm, err := time.Parse(layout, value) - if err != nil { - panic(err) - } - return tm -} - -// MustReadAll reads r. Panic on error. -func MustReadAll(r io.Reader) []byte { - b, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - return b -} - -// MustTempFile returns a path to a temporary file. -func MustTempFile() string { - f, err := ioutil.TempFile("", "influxd-") - if err != nil { - panic(err) - } - f.Close() - os.Remove(f.Name()) - return f.Name() -} - -func expectPattern(exp, act string) bool { - re := regexp.MustCompile(exp) - if !re.MatchString(act) { - return false - } - return true -} - -type Query struct { - name string - command string - params url.Values - exp, act string - pattern bool - skip bool -} - -// Execute runs the command and returns an err if it fails -func (q *Query) Execute(s *Server) (err error) { - if q.params == nil { - q.act, err = s.Query(q.command) - return - } - q.act, err = s.QueryWithParams(q.command, q.params) - return -} - -func (q *Query) success() bool { - if q.pattern { - return expectPattern(q.exp, q.act) - } - return q.exp == q.act -} - -func (q *Query) Error(err error) string { - return fmt.Sprintf("%s: %v", q.name, err) -} - -func (q *Query) failureMessage() string { - return fmt.Sprintf("%s: unexpected results\nquery: %s\nexp: %s\nactual: %s\n", q.name, q.command, q.exp, q.act) -} - -type Test struct { - initialized bool - write string - params url.Values - db string - rp string - exp string - queries []*Query -} - -func NewTest(db, rp string) Test { - return Test{ - db: db, - rp: rp, - } -} - -func (t *Test) addQueries(q ...*Query) { - t.queries = append(t.queries, q...) -} - -func (t *Test) init(s *Server) error { - if t.write == "" || t.initialized { - return nil - } - t.initialized = true - if res, err := s.Write(t.db, t.rp, t.write, t.params); err != nil { - return err - } else if t.exp != res { - return fmt.Errorf("unexpected results\nexp: %s\ngot: %s\n", t.exp, res) - } - return nil -} - -func configureLogging(s *Server) { - // Set the logger to discard unless verbose is on - if !testing.Verbose() { - type logSetter interface { - SetLogger(*log.Logger) - } - nullLogger := log.New(ioutil.Discard, "", 0) - s.MetaStore.Logger = nullLogger - s.TSDBStore.Logger = nullLogger - s.HintedHandoff.SetLogger(nullLogger) - for _, service := range s.Services { - if service, ok := service.(logSetter); ok { - service.SetLogger(nullLogger) - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go deleted file mode 100644 index 10d67c7de..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.go +++ /dev/null @@ -1,3719 +0,0 @@ -package run_test - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "testing" - "time" -) - -// Ensure that HTTP responses include the InfluxDB version. -func TestServer_HTTPResponseVersion(t *testing.T) { - version := "v1234" - s := OpenServerWithVersion(NewConfig(), version) - defer s.Close() - - resp, _ := http.Get(s.URL() + "/query") - got := resp.Header.Get("X-Influxdb-Version") - if got != version { - t.Errorf("Server responded with incorrect version, exp %s, got %s", version, got) - } -} - -// Ensure the database commands work. -func TestServer_DatabaseCommands(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - test := Test{ - queries: []*Query{ - &Query{ - name: "create database should succeed", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create database should error with bad name", - command: `CREATE DATABASE 0xdb0`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 17"}`, - }, - &Query{ - name: "show database should succeed", - command: `SHOW DATABASES`, - exp: `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db0"]]}]}]}`, - }, - &Query{ - name: "create database should error if it already exists", - command: `CREATE DATABASE db0`, - exp: `{"results":[{"error":"database already exists"}]}`, - }, - &Query{ - name: "drop database should succeed", - command: `DROP DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show database should have no results", - command: `SHOW DATABASES`, - exp: `{"results":[{"series":[{"name":"databases","columns":["name"]}]}]}`, - }, - &Query{ - name: "drop database should error if it doesn't exist", - command: `DROP DATABASE db0`, - exp: `{"results":[{"error":"database not found: db0"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Drop database after data write", - command: `DROP DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Recreate database", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Recreate retention policy", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 365d REPLICATION 1 DEFAULT`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Show measurements after recreate", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data after recreate", - command: `SELECT * FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropDatabaseIsolated(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicyInfo("rp1", 1, 0)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Query data from 1st database", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data from 1st database with GROUP BY *", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop other database", - command: `DROP DATABASE db1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "Query data from 1st database and ensure it's still there", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Query data from 1st database and ensure it's still there with GROUP BY *", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_DropAndRecreateSeries(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Show series is present", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop series after data write", - command: `DROP SERIES FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Show series is gone", - command: `SHOW SERIES`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - // Re-write data and test again. - reTest := NewTest("db0", "rp0") - reTest.write = strings.Join(writes, "\n") - - reTest.addQueries([]*Query{ - &Query{ - name: "Show series is present again after re-write", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range reTest.queries { - if i == 0 { - if err := reTest.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure retention policy commands work. -func TestServer_RetentionPolicyCommands(t *testing.T) { - t.Parallel() - c := NewConfig() - c.Meta.RetentionAutoCreate = false - s := OpenServer(c, "") - defer s.Close() - - // Create a database. - if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - test := Test{ - queries: []*Query{ - &Query{ - name: "create retention policy should succeed", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "create retention policy should error if it already exists", - command: `CREATE RETENTION POLICY rp0 ON db0 DURATION 1h REPLICATION 1`, - exp: `{"results":[{"error":"retention policy already exists"}]}`, - }, - &Query{ - name: "show retention policy should succeed", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","1h0m0s",1,false]]}]}]}`, - }, - &Query{ - name: "alter retention policy should succeed", - command: `ALTER RETENTION POLICY rp0 ON db0 DURATION 2h REPLICATION 3 DEFAULT`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policy should have new altered information", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["rp0","2h0m0s",3,true]]}]}]}`, - }, - &Query{ - name: "drop retention policy should succeed", - command: `DROP RETENTION POLICY rp0 ON db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policy should be empty after dropping them", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"]}]}]}`, - }, - &Query{ - name: "Ensure retention policy with unacceptable retention cannot be created", - command: `CREATE RETENTION POLICY rp3 ON db0 DURATION 1s REPLICATION 1`, - exp: `{"results":[{"error":"retention policy duration must be at least 1h0m0s"}]}`, - }, - &Query{ - name: "Check error when deleting retention policy on non-existent database", - command: `DROP RETENTION POLICY rp1 ON mydatabase`, - exp: `{"results":[{"error":"database not found"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the autocreation of retention policy works. -func TestServer_DatabaseRetentionPolicyAutoCreate(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - test := Test{ - queries: []*Query{ - &Query{ - name: "create database should succeed", - command: `CREATE DATABASE db0`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show retention policies should return auto-created policy", - command: `SHOW RETENTION POLICIES ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,true]]}]}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure user commands work. -func TestServer_UserCommands(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // Create a database. - if _, err := s.MetaStore.CreateDatabase("db0"); err != nil { - t.Fatal(err) - } - - test := Test{ - queries: []*Query{ - &Query{ - name: "show users, no actual users", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, - }, - &Query{ - name: `create user`, - command: "CREATE USER jdoe WITH PASSWORD '1337'", - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show users, 1 existing user", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",false]]}]}]}`, - }, - &Query{ - name: "grant all priviledges to jdoe", - command: `GRANT ALL PRIVILEGES TO jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "show users, existing user as admin", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"],"values":[["jdoe",true]]}]}]}`, - }, - &Query{ - name: "grant DB privileges to user", - command: `GRANT READ ON db0 TO jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "revoke all privileges", - command: `REVOKE ALL PRIVILEGES FROM jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "bad create user request", - command: `CREATE USER 0xBAD WITH PASSWORD pwd1337`, - exp: `{"error":"error parsing query: found 0, expected identifier at line 1, char 13"}`, - }, - &Query{ - name: "bad create user request, no name", - command: `CREATE USER WITH PASSWORD pwd1337`, - exp: `{"error":"error parsing query: found WITH, expected identifier at line 1, char 13"}`, - }, - &Query{ - name: "bad create user request, no password", - command: `CREATE USER jdoe`, - exp: `{"error":"error parsing query: found EOF, expected WITH at line 1, char 18"}`, - }, - &Query{ - name: "drop user", - command: `DROP USER jdoe`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "make sure user was dropped", - command: `SHOW USERS`, - exp: `{"results":[{"series":[{"columns":["user","admin"]}]}]}`, - }, - &Query{ - name: "delete non existing user", - command: `DROP USER noone`, - exp: `{"results":[{"error":"user not found"}]}`, - }, - }, - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(fmt.Sprintf("command: %s - err: %s", query.command, query.Error(err))) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can create a single point via json protocol and read it back. -func TestServer_Write_JSON(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("", "", fmt.Sprintf(`{"database" : "db0", "retentionPolicy" : "rp0", "points": [{"measurement": "cpu", "tags": {"host": "server02"},"fields": {"value": 1.0}}],"time":"%s"} `, now.Format(time.RFC3339Nano)), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with float type and read it back. -func TestServer_Write_LineProtocol_Float(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=1.0 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with bool type and read it back. -func TestServer_Write_LineProtocol_Bool(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=true `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",true]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with string type and read it back. -func TestServer_Write_LineProtocol_String(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value="disk full" `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s","disk full"]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can create a single point via line protocol with integer type and read it back. -func TestServer_Write_LineProtocol_Integer(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - if res, err := s.Write("db0", "rp0", `cpu,host=server01 value=100 `+strconv.FormatInt(now.UnixNano(), 10), nil); err != nil { - t.Fatal(err) - } else if exp := ``; exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } - - // Verify the data was written. - if res, err := s.Query(`SELECT * FROM db0.rp0.cpu GROUP BY *`); err != nil { - t.Fatal(err) - } else if exp := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]}]}]}`, now.Format(time.RFC3339Nano)); exp != res { - t.Fatalf("unexpected results\nexp: %s\ngot: %s\n", exp, res) - } -} - -// Ensure the server can query with default databases (via param) and default retention policy -func TestServer_Query_DefaultDBAndRP(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf(`cpu value=1.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()) - - test.addQueries([]*Query{ - &Query{ - name: "default db and rp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "default rp exists", - command: `show retention policies ON db0`, - exp: `{"results":[{"series":[{"columns":["name","duration","replicaN","default"],"values":[["default","0",1,false],["rp0","1h0m0s",1,true]]}]}]}`, - }, - &Query{ - name: "default rp", - command: `SELECT * FROM db0..cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "default dp", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM rp0.cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T01:00:00Z",1]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can have a database with multiple measurements. -func TestServer_Query_Multiple_Measurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - // Make sure we do writes for measurements that will span across shards - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["2000-01-01T00:00:00Z","server01",100]]}]}]}`, - }, - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu GROUP BY host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server correctly supports data with identical tag values. -func TestServer_Query_IdenticalTagValues(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf("cpu,t1=val1 value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu,t2=val2 value=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf("cpu,t1=val2 value=3 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:02:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurements with identical tag values - SELECT *, no GROUP BY", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, - }, - &Query{ - name: "measurements with identical tag values - SELECT *, with GROUP BY", - command: `SELECT value FROM db0.rp0.cpu GROUP BY t1,t2`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"t1":"val1","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"t1":"val2","t2":""},"columns":["time","value"],"values":[["2000-01-01T00:02:00Z",3]]},{"name":"cpu","tags":{"t1":"","t2":"val2"},"columns":["time","value"],"values":[["2000-01-01T00:01:00Z",2]]}]}]}`, - }, - &Query{ - name: "measurements with identical tag values - SELECT value no GROUP BY", - command: `SELECT value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["2000-01-01T00:00:00Z",1],["2000-01-01T00:01:00Z",2],["2000-01-01T00:02:00Z",3]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can handle a query that involves accessing no shards. -func TestServer_Query_NoShards(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu WHERE time < now() - 1d`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query a non-existent field -func TestServer_Query_NonExistent(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "selecting value should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting non-existent should succeed", - command: `SELECT foo FROM db0.rp0.cpu`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can perform basic math -func TestServer_Query_Math(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db", newRetentionPolicyInfo("rp", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - writes := []string{ - "float value=42 " + strconv.FormatInt(now.UnixNano(), 10), - "integer value=42i " + strconv.FormatInt(now.UnixNano(), 10), - } - - test := NewTest("db", "rp") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SELECT multiple of float value", - command: `SELECT value * 2 from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT multiple of float value", - command: `SELECT 2 * value from db.rp.float`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"float","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT multiple of integer value", - command: `SELECT value * 2 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "SELECT float multiple of integer value", - command: `SELECT value * 2.0 from db.rp.integer`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"integer","columns":["time",""],"values":[["%s",84]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with the count aggregate function -func TestServer_Query_Count(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - hour_ago := now.Add(-time.Hour).UTC() - - test.addQueries([]*Query{ - &Query{ - name: "selecting count(value) should succeed", - command: `SELECT count(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "selecting count(value) with where time should return result", - command: fmt.Sprintf(`SELECT count(value) FROM db0.rp0.cpu WHERE time >= '%s'`, hour_ago.Format(time.RFC3339Nano)), - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["%s",1]]}]}]}`, hour_ago.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting count(*) should error", - command: `SELECT count(*) FROM db0.rp0.cpu`, - exp: `{"results":[{"error":"expected field argument in count()"}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with Now(). -func TestServer_Query_Now(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "where with time < now() should work", - command: `SELECT * FROM db0.rp0.cpu where time < now()`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "where with time < now() and GROUP BY * should work", - command: `SELECT * FROM db0.rp0.cpu where time < now() GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "where with time > now() should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now()`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "where with time > now() with GROUP BY * should return an empty result", - command: `SELECT * FROM db0.rp0.cpu where time > now() GROUP BY *`, - exp: `{"results":[{}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with epoch precisions. -func TestServer_Query_EpochPrecision(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = `cpu,host=server01 value=1.0 ` + strconv.FormatInt(now.UnixNano(), 10) - - test.addQueries([]*Query{ - &Query{ - name: "nanosecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"n"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()), - }, - &Query{ - name: "microsecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"u"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Microsecond)), - }, - &Query{ - name: "millisecond precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"ms"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Millisecond)), - }, - &Query{ - name: "second precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"s"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Second)), - }, - &Query{ - name: "minute precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"m"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Minute)), - }, - &Query{ - name: "hour precision", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - params: url.Values{"epoch": []string{"h"}}, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[[%d,1]]}]}]}`, now.UnixNano()/int64(time.Hour)), - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server works with tag queries. -func TestServer_Query_Tags(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", now.UnixNano()), - fmt.Sprintf("cpu,host=server02 value=50,core=2 %d", now.Add(1).UnixNano()), - - fmt.Sprintf("cpu1,host=server01,region=us-west value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=200 %d", mustParseTime(time.RFC3339Nano, "2010-02-28T01:03:37.703820946Z").UnixNano()), - fmt.Sprintf("cpu1,host=server03 value=300 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu2,host=server01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu2 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - - fmt.Sprintf("cpu3,company=acme01 value=100 %d", mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf("cpu3 value=200 %d", mustParseTime(time.RFC3339Nano, "2012-02-28T01:03:38.703820946Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "tag without field should return error", - command: `SELECT host FROM db0.rp0.cpu`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "field with tag should succeed", - command: `SELECT host, value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value"],"values":[["%s","server01",100],["%s","server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with tag and GROUP BY should succeed", - command: `SELECT host, value FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with two tags should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","host","value","core"],"values":[["%s","server01",100,4],["%s","server02",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "field with two tags and GROUP BY should succeed", - command: `SELECT host, value, core FROM db0.rp0.cpu GROUP BY host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value","core"],"values":[["%s",100,4]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value","core"],"values":[["%s",50,2]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "select * with tags should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","core","host","value"],"values":[["%s",4,"server01",100],["%s",2,"server02",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "select * with tags with GROUP BY * should succeed", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","core","value"],"values":[["%s",4,100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","core","value"],"values":[["%s",2,50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "group by tag", - command: `SELECT value FROM db0.rp0.cpu GROUP by host`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","value"],"values":[["%s",50]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "single field (EQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (2 EQ tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region = 'us-west'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR different tags)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server03' OR region = 'us-west'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR with non-existent tag value)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server66'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (OR with all tag values)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' OR host = 'server02' OR host = 'server03'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (1 EQ and 1 NEQ tag)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server01' AND region != 'us-west'`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "single field (EQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host = 'server02'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 AND NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02'`, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",300]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 OR NEQ tag value2)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' OR host != 'server02'`, // Yes, this is always true, but that's the point. - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","value"],"values":[["2010-02-28T01:03:37.703820946Z",200],["2012-02-28T01:03:38.703820946Z",300],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1 AND NEQ tag value2 AND NEQ tag value3)", - command: `SELECT value FROM db0.rp0.cpu1 WHERE host != 'server01' AND host != 'server02' AND host != 'server03'`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu2 WHERE host != 'server01'`, - exp: `{"results":[{"series":[{"name":"cpu2","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (NEQ tag value1, point without any tags)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme01/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company =~ /acme01/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - &Query{ - name: "single field (regex tag match)", - command: `SELECT value FROM db0.rp0.cpu3 WHERE company !~ /acme[23]/`, - exp: `{"results":[{"series":[{"name":"cpu3","columns":["time","value"],"values":[["2012-02-28T01:03:38.703820946Z",200],["2015-02-28T01:03:36.703820946Z",100]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server correctly queries with an alias. -func TestServer_Query_Alias(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf("cpu value=1i,steps=3i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu value=2i,steps=4i %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "baseline query - SELECT * FROM db0.rp0.cpu", - command: `SELECT * FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","value"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - &Query{ - name: "basic query with alias - SELECT steps, value as v FROM db0.rp0.cpu", - command: `SELECT steps, value as v FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","steps","v"],"values":[["2000-01-01T00:00:00Z",3,1],["2000-01-01T00:01:00Z",4,2]]}]}]}`, - }, - &Query{ - name: "double aggregate sum - SELECT sum(value), sum(steps) FROM db0.rp0.cpu", - command: `SELECT sum(value), sum(steps) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - &Query{ - name: "double aggregate sum reverse order - SELECT sum(steps), sum(value) FROM db0.rp0.cpu", - command: `SELECT sum(steps), sum(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","sum"],"values":[["1970-01-01T00:00:00Z",7,3]]}]}]}`, - }, - &Query{ - name: "double aggregate sum with alias - SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu", - command: `SELECT sum(value) as sumv, sum(steps) as sums FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sumv","sums"],"values":[["1970-01-01T00:00:00Z",3,7]]}]}]}`, - }, - &Query{ - name: "double aggregate with same value - SELECT sum(value), mean(value) FROM db0.rp0.cpu", - command: `SELECT sum(value), mean(value) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",3,1.5]]}]}]}`, - }, - &Query{ - name: "double aggregate with same value and same alias - SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu", - command: `SELECT mean(value) as mv, max(value) as mv FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mv","mv"],"values":[["1970-01-01T00:00:00Z",1.5,2]]}]}]}`, - }, - &Query{ - name: "double aggregate with non-existent field - SELECT mean(value), max(foo) FROM db0.rp0.cpu", - command: `SELECT mean(value), max(foo) FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","mean","max"],"values":[["1970-01-01T00:00:00Z",1.5,null]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server will succeed and error for common scenarios. -func TestServer_Query_Common(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu,host=server01 value=1 %s", strconv.FormatInt(now.UnixNano(), 10)) - - test.addQueries([]*Query{ - &Query{ - name: "selecting a from a non-existent database should error", - command: `SELECT value FROM db1.rp0.cpu`, - exp: `{"results":[{"error":"database not found: db1"}]}`, - }, - &Query{ - name: "selecting a from a non-existent retention policy should error", - command: `SELECT value FROM db0.rp1.cpu`, - exp: `{"results":[{"error":"retention policy not found"}]}`, - }, - &Query{ - name: "selecting a valid measurement and field should succeed", - command: `SELECT value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "explicitly selecting time and a valid measurement and field should succeed", - command: `SELECT time,value FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",1]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting a measurement that doesn't exist should result in empty set", - command: `SELECT value FROM db0.rp0.idontexist`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "selecting a field that doesn't exist should result in empty set", - command: `SELECT idontexist FROM db0.rp0.cpu`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "selecting wildcard without specifying a database should error", - command: `SELECT * FROM cpu`, - exp: `{"results":[{"error":"database name required"}]}`, - }, - &Query{ - name: "selecting explicit field without specifying a database should error", - command: `SELECT value FROM cpu`, - exp: `{"results":[{"error":"database name required"}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query two points. -func TestServer_Query_SelectTwoPoints(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=100 %s\ncpu value=200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) - - test.addQueries( - &Query{ - name: "selecting two points should result in two points", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - &Query{ - name: "selecting two points with GROUP BY * should result in two points", - command: `SELECT * FROM db0.rp0.cpu GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }, - ) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query two negative points. -func TestServer_Query_SelectTwoNegativePoints(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=-100 %s\ncpu value=-200 %s", strconv.FormatInt(now.UnixNano(), 10), strconv.FormatInt(now.Add(1).UnixNano(), 10)) - - test.addQueries(&Query{ - name: "selecting two negative points should succeed", - command: `SELECT * FROM db0.rp0.cpu`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[["%s",-100],["%s",-200]]}]}]}`, now.Format(time.RFC3339Nano), now.Add(1).Format(time.RFC3339Nano)), - }) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can query with relative time. -func TestServer_Query_SelectRelativeTime(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - now := now() - yesterday := yesterday() - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu,host=server01 value=100 %s\ncpu,host=server01 value=200 %s", strconv.FormatInt(yesterday.UnixNano(), 10), strconv.FormatInt(now.UnixNano(), 10)) - - test.addQueries([]*Query{ - &Query{ - name: "single point with time pre-calculated for past time queries yesterday", - command: `SELECT * FROM db0.rp0.cpu where time >= '` + yesterday.Add(-1*time.Minute).Format(time.RFC3339Nano) + `' GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",100],["%s",200]]}]}]}`, yesterday.Format(time.RFC3339Nano), now.Format(time.RFC3339Nano)), - }, - &Query{ - name: "single point with time pre-calculated for relative time queries now", - command: `SELECT * FROM db0.rp0.cpu where time >= now() - 1m GROUP BY *`, - exp: fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["%s",200]]}]}]}`, now.Format(time.RFC3339Nano)), - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Ensure the server can handle various simple calculus queries. -func TestServer_Query_SelectRawCalculus(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu value=210 1278010021000000000\ncpu value=10 1278010022000000000") - - test.addQueries([]*Query{ - &Query{ - name: "calculate single derivate", - command: `SELECT derivative(value) from db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","derivative"],"values":[["2010-07-01T18:47:02Z",-200]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// mergeMany ensures that when merging many series together and some of them have a different number -// of points than others in a group by interval the results are correct -func TestServer_Query_MergeMany(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 11; i++ { - for j := 1; j < 5+i%3; j++ { - data := fmt.Sprintf(`cpu,host=server_%d value=22 %d`, i, time.Unix(int64(j), int64(0)).UTC().UnixNano()) - writes = append(writes, data) - } - } - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "GROUP by time", - command: `SELECT count(value) FROM db0.rp0.cpu WHERE time >= '1970-01-01T00:00:01Z' AND time <= '1970-01-01T00:00:06Z' GROUP BY time(1s)`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:01Z",10],["1970-01-01T00:00:02Z",10],["1970-01-01T00:00:03Z",10],["1970-01-01T00:00:04Z",10],["1970-01-01T00:00:05Z",7],["1970-01-01T00:00:06Z",3]]}]}]}`, - }, - &Query{ - skip: true, - name: "GROUP by tag - FIXME issue #2875", - command: `SELECT count(value) FROM db0.rp0.cpu where time >= '2000-01-01T00:00:00Z' and time <= '2000-01-01T02:00:00Z' group by host`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server02"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server03"},"columns":["time","count"],"values":[["2000-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "GROUP by field", - command: `SELECT count(value) FROM db0.rp0.cpu group by value`, - exp: `{"results":[{"error":"can not use field in GROUP BY clause: value"}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_SLimitAndSOffset(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - writes := []string{} - for i := 1; i < 10; i++ { - data := fmt.Sprintf(`cpu,region=us-east,host=server-%d value=%d %d`, i, i, time.Unix(int64(i), int64(0)).UnixNano()) - writes = append(writes, data) - } - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SLIMIT 2 SOFFSET 1", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 1`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-2","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-3","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "SLIMIT 2 SOFFSET 3", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 2 SOFFSET 3`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-4","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]},{"name":"cpu","tags":{"host":"server-5","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - &Query{ - name: "SLIMIT 3 SOFFSET 8", - command: `SELECT count(value) FROM db0.rp0.cpu GROUP BY * SLIMIT 3 SOFFSET 8`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server-9","region":"us-east"},"columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Regex(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu1,host=server01 value=10 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu2,host=server01 value=20 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu3,host=server01 value=30 %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "default db and rp", - command: `SELECT * FROM /cpu[13]/`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",10]]},{"name":"cpu3","columns":["time","host","value"],"values":[["2015-02-28T01:03:36.703820946Z","server01",30]]}]}]}`, - }, - &Query{ - name: "default db and rp with GROUP BY *", - command: `SELECT * FROM /cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "specifying db and rp", - command: `SELECT * FROM db0.rp0./cpu[13]/ GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "default db and specified rp", - command: `SELECT * FROM rp0./cpu[13]/ GROUP BY *`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - &Query{ - name: "specified db and default rp", - command: `SELECT * FROM db0../cpu[13]/ GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu1","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",10]]},{"name":"cpu3","tags":{"host":"server01"},"columns":["time","value"],"values":[["2015-02-28T01:03:36.703820946Z",30]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Aggregates(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`int value=45 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmax value=%s %d`, maxInt64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - - fmt.Sprintf(`intmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`intmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`intmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`intmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`intmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`intmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - - fmt.Sprintf(`intoverlap,region=us-east value=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east value=30 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-west value=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`intoverlap,region=us-east otherVal=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - - fmt.Sprintf(`floatsingle value=45.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - - fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatmax value=%s %d`, maxFloat64(), mustParseTime(time.RFC3339Nano, "2000-01-01T01:00:00Z").UnixNano()), - - fmt.Sprintf(`floatmany,host=server01 value=2.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server02 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server03 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server04 value=4.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server05 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:40Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server06 value=5.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:50Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server07 value=7.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:00Z").UnixNano()), - fmt.Sprintf(`floatmany,host=server08 value=9.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:01:10Z").UnixNano()), - - fmt.Sprintf(`floatoverlap,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-west value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`floatoverlap,region=us-east otherVal=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - - fmt.Sprintf(`load,region=us-east,host=serverA value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`load,region=us-east,host=serverB value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`load,region=us-west,host=serverC value=100.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=redis value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,region=uk,host=serverZ,service=mysql value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - - fmt.Sprintf(`stringdata value="first" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:03Z").UnixNano()), - fmt.Sprintf(`stringdata value="last" %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:04Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - // int64 - &Query{ - name: "stddev with just one point - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM int`, - exp: `{"results":[{"series":[{"name":"int","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "large mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmax`, - exp: `{"results":[{"series":[{"name":"intmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxInt64() + `,0]]}]}]}`, - }, - &Query{ - name: "mean and stddev - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM intmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - &Query{ - name: "first - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "last - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - &Query{ - name: "spread - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "median - even count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - &Query{ - name: "median - odd count - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM intmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct alt syntax - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM intmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "distinct alt select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM intmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "count distinct - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct as call - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "count distinct as call select tag - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM intmany`, - exp: `{"results":[{"series":[{"name":"intmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "aggregation with no interval - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM intoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"series":[{"name":"intoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "sum - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - &Query{ - name: "aggregation with a null field value - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations - int", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - &Query{ - skip: true, - name: "multiple aggregations with division - int FIXME issue #2879", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value), mean(value), sum(value) / mean(value) as div FROM intoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"intoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean","div"],"values":[["1970-01-01T00:00:00Z",50,25,2]]},{"name":"intoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",100,100,1]]}]}]}`, - }, - - // float64 - &Query{ - name: "stddev with just one point - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM floatsingle`, - exp: `{"results":[{"series":[{"name":"floatsingle","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "large mean and stddev - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM floatmax`, - exp: `{"results":[{"series":[{"name":"floatmax","columns":["time","mean","stddev"],"values":[["1970-01-01T00:00:00Z",` + maxFloat64() + `,0]]}]}]}`, - }, - &Query{ - name: "mean and stddev - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value), STDDEV(value) FROM floatmany WHERE time >= '2000-01-01' AND time < '2000-01-01T00:02:00Z' GROUP BY time(10m)`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","mean","stddev"],"values":[["2000-01-01T00:00:00Z",5,2.138089935299395]]}]}]}`, - }, - &Query{ - name: "first - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","first"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "last - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","last"],"values":[["1970-01-01T00:00:00Z",9]]}]}]}`, - }, - &Query{ - name: "spread - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SPREAD(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","spread"],"values":[["1970-01-01T00:00:00Z",7]]}]}]}`, - }, - &Query{ - name: "median - even count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4.5]]}]}]}`, - }, - &Query{ - name: "median - odd count - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM floatmany where time < '2000-01-01T00:01:10Z'`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct alt syntax - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT value FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","distinct"],"values":[["1970-01-01T00:00:00Z",[2,4,5,7,9]]]}]}]}`, - }, - &Query{ - name: "distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT(host) FROM floatmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "distinct alt select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT DISTINCT host FROM floatmany`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - &Query{ - name: "count distinct - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT value) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct as call - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT(value)) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",5]]}]}]}`, - }, - &Query{ - name: "count distinct select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "count distinct as call select tag - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(DISTINCT host) FROM floatmany`, - exp: `{"results":[{"series":[{"name":"floatmany","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "aggregation with no interval - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT count(value) FROM floatoverlap WHERE time = '2000-01-01 00:00:00'`, - exp: `{"results":[{"series":[{"name":"floatoverlap","columns":["time","count"],"values":[["2000-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "sum - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap WHERE time >= '2000-01-01 00:00:05' AND time <= '2000-01-01T00:00:10Z' GROUP BY time(10s), region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["2000-01-01T00:00:00Z",null],["2000-01-01T00:00:10Z",30]]}]}]}`, - }, - &Query{ - name: "aggregation with a null field value - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT SUM(value), MEAN(value) FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",50,25]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","sum","mean"],"values":[["1970-01-01T00:00:00Z",100,100]]}]}]}`, - }, - &Query{ - name: "multiple aggregations with division - float", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) / mean(value) as div FROM floatoverlap GROUP BY region`, - exp: `{"results":[{"series":[{"name":"floatoverlap","tags":{"region":"us-east"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",2]]},{"name":"floatoverlap","tags":{"region":"us-west"},"columns":["time","div"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, - }, - - // strings - &Query{ - name: "STDDEV on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT STDDEV(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","stddev"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "MEAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEAN(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",0]]}]}]}`, - }, - &Query{ - name: "MEDIAN on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT MEDIAN(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","median"],"values":[["1970-01-01T00:00:00Z",null]]}]}]}`, - }, - &Query{ - name: "COUNT on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT COUNT(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",2]]}]}]}`, - }, - &Query{ - name: "FIRST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT FIRST(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","first"],"values":[["1970-01-01T00:00:00Z","first"]]}]}]}`, - }, - &Query{ - name: "LAST on string data - string", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT LAST(value) FROM stringdata`, - exp: `{"results":[{"series":[{"name":"stringdata","columns":["time","last"],"values":[["1970-01-01T00:00:00Z","last"]]}]}]}`, - }, - - // general queries - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM load GROUP BY region, host`, - exp: `{"results":[{"series":[{"name":"load","tags":{"host":"serverA","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",20]]},{"name":"load","tags":{"host":"serverB","region":"us-east"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",30]]},{"name":"load","tags":{"host":"serverC","region":"us-west"},"columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "aggregation with WHERE and AND", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value) FROM cpu WHERE region='uk' AND host='serverZ'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","sum"],"values":[["1970-01-01T00:00:00Z",50]]}]}]}`, - }, - - // Mathematics - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)*2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",300]]}]}]}`, - }, - &Query{ - name: "group by multiple dimensions", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT sum(value)/2 FROM load`, - exp: `{"results":[{"series":[{"name":"load","columns":["time",""],"values":[["1970-01-01T00:00:00Z",75]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Write_Precision(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []struct { - write string - params url.Values - }{ - { - write: fmt.Sprintf("cpu_n0_precision value=1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), - }, - { - write: fmt.Sprintf("cpu_n1_precision value=1.1 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").UnixNano()), - params: url.Values{"precision": []string{"n"}}, - }, - { - write: fmt.Sprintf("cpu_u_precision value=100 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Microsecond).UnixNano()/int64(time.Microsecond)), - params: url.Values{"precision": []string{"u"}}, - }, - { - write: fmt.Sprintf("cpu_ms_precision value=200 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Millisecond).UnixNano()/int64(time.Millisecond)), - params: url.Values{"precision": []string{"ms"}}, - }, - { - write: fmt.Sprintf("cpu_s_precision value=300 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Second).UnixNano()/int64(time.Second)), - params: url.Values{"precision": []string{"s"}}, - }, - { - write: fmt.Sprintf("cpu_m_precision value=400 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Minute).UnixNano()/int64(time.Minute)), - params: url.Values{"precision": []string{"m"}}, - }, - { - write: fmt.Sprintf("cpu_h_precision value=500 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T12:34:56.789012345Z").Truncate(time.Hour).UnixNano()/int64(time.Hour)), - params: url.Values{"precision": []string{"h"}}, - }, - } - - test := NewTest("db0", "rp0") - - test.addQueries([]*Query{ - &Query{ - name: "point with nanosecond precision time - no precision specified on write", - command: `SELECT * FROM cpu_n0_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_n0_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1]]}]}]}`, - }, - &Query{ - name: "point with nanosecond precision time", - command: `SELECT * FROM cpu_n1_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_n1_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012345Z",1.1]]}]}]}`, - }, - &Query{ - name: "point with microsecond precision time", - command: `SELECT * FROM cpu_u_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_u_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789012Z",100]]}]}]}`, - }, - &Query{ - name: "point with millisecond precision time", - command: `SELECT * FROM cpu_ms_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_ms_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56.789Z",200]]}]}]}`, - }, - &Query{ - name: "point with second precision time", - command: `SELECT * FROM cpu_s_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_s_precision","columns":["time","value"],"values":[["2000-01-01T12:34:56Z",300]]}]}]}`, - }, - &Query{ - name: "point with minute precision time", - command: `SELECT * FROM cpu_m_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_m_precision","columns":["time","value"],"values":[["2000-01-01T12:34:00Z",400]]}]}]}`, - }, - &Query{ - name: "point with hour precision time", - command: `SELECT * FROM cpu_h_precision`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"cpu_h_precision","columns":["time","value"],"values":[["2000-01-01T12:00:00Z",500]]}]}]}`, - }, - }...) - - // we are doing writes that require parameter changes, so we are fighting the test harness a little to make this happen properly - for _, w := range writes { - test.write = w.write - test.params = w.params - test.initialized = false - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Wildcards(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east value=10 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east valx=20 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east value=30,valx=40 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - - fmt.Sprintf(`wgroup,region=us-east value=10.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-east value=20.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wgroup,region=us-west value=30.0 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","region","value","valx"],"values":[["2000-01-01T00:00:00Z","us-east",10,null],["2000-01-01T00:00:10Z","us-east",null,20],["2000-01-01T00:00:20Z","us-east",30,40]]}]}]}`, - }, - &Query{ - name: "wildcard with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard GROUP BY *`, - exp: `{"results":[{"series":[{"name":"wildcard","tags":{"region":"us-east"},"columns":["time","value","valx"],"values":[["2000-01-01T00:00:00Z",10,null],["2000-01-01T00:00:10Z",null,20],["2000-01-01T00:00:20Z",30,40]]}]}]}`, - }, - &Query{ - name: "GROUP BY queries", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup GROUP BY *`, - exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["1970-01-01T00:00:00Z",30]]}]}]}`, - }, - &Query{ - name: "GROUP BY queries with time", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT mean(value) FROM wgroup WHERE time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T00:01:00Z' GROUP BY *,TIME(1m)`, - exp: `{"results":[{"series":[{"name":"wgroup","tags":{"region":"us-east"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",15]]},{"name":"wgroup","tags":{"region":"us-west"},"columns":["time","mean"],"values":[["2000-01-01T00:00:00Z",30]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_WildcardExpansion(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`wildcard,region=us-east,host=A value=10,cpu=80 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=B value=20,cpu=90 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-west,host=B value=30,cpu=70 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - fmt.Sprintf(`wildcard,region=us-east,host=A value=40,cpu=60 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:30Z").UnixNano()), - - fmt.Sprintf(`dupnames,region=us-east,day=1 value=10,day=3i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-east,day=2 value=20,day=2i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:10Z").UnixNano()), - fmt.Sprintf(`dupnames,region=us-west,day=3 value=30,day=1i %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:20Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "wildcard", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "no wildcard in select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu, host, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","cpu","host","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "no wildcard in select, preserve column order", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, cpu, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","host","cpu","region","value"],"values":[["2000-01-01T00:00:00Z","A",80,"us-east",10],["2000-01-01T00:00:10Z","B",90,"us-east",20],["2000-01-01T00:00:20Z","B",70,"us-west",30],["2000-01-01T00:00:30Z","A",60,"us-east",40]]}]}]}`, - }, - - &Query{ - name: "only tags, no fields", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT host, region FROM wildcard`, - exp: `{"results":[{"error":"statement must have at least one field in select clause"}]}`, - }, - - &Query{ - name: "no wildcard with alias", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT cpu as c, host as h, region, value FROM wildcard`, - exp: `{"results":[{"series":[{"name":"wildcard","columns":["time","c","h","region","value"],"values":[["2000-01-01T00:00:00Z",80,"A","us-east",10],["2000-01-01T00:00:10Z",90,"B","us-east",20],["2000-01-01T00:00:20Z",70,"B","us-west",30],["2000-01-01T00:00:30Z",60,"A","us-east",40]]}]}]}`, - }, - &Query{ - name: "duplicate tag and field name, always favor field over tag", - command: `SELECT * FROM dupnames`, - params: url.Values{"db": []string{"db0"}}, - exp: `{"results":[{"series":[{"name":"dupnames","columns":["time","day","region","value"],"values":[["2000-01-01T00:00:00Z",3,"us-east",10],["2000-01-01T00:00:10Z",2,"us-east",20],["2000-01-01T00:00:20Z",1,"us-west",30]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_AcrossShardsAndFields(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu load=100 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu load=200 %d`, mustParseTime(time.RFC3339Nano, "2010-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`cpu core=4 %d`, mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "two results for cpu", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2000-01-01T00:00:00Z",100],["2010-01-01T00:00:00Z",200]]}]}]}`, - }, - &Query{ - name: "two results for cpu, multi-select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core,load FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - &Query{ - name: "two results for cpu, wildcard select", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT * FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core","load"],"values":[["2000-01-01T00:00:00Z",null,100],["2010-01-01T00:00:00Z",null,200],["2015-01-01T00:00:00Z",4,null]]}]}]}`, - }, - &Query{ - name: "one result for core", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT core FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2015-01-01T00:00:00Z",4]]}]}]}`, - }, - &Query{ - name: "empty result set from non-existent field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT foo FROM cpu`, - exp: `{"results":[{}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Where_Fields(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - fmt.Sprintf(`cpu alert_id="alert",tenant_id="tenant",_cust="johnson brothers" %d`, mustParseTime(time.RFC3339Nano, "2015-02-28T01:03:36.703820946Z").UnixNano()), - - fmt.Sprintf(`cpu load=100.0,core=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu load=80.0,core=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:01:02Z").UnixNano()), - - fmt.Sprintf(`clicks local=true %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`clicks local=false %d`, mustParseTime(time.RFC3339Nano, "2014-11-10T23:00:02Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - // non type specific - &Query{ - name: "missing measurement with group by", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT load from missing group by *`, - exp: `{"results":[{}]}`, - }, - - // string - &Query{ - name: "single string field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE alert_id='alert'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - &Query{ - name: "string AND query, all fields in SELECT", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id,_cust FROM cpu WHERE alert_id='alert' AND tenant_id='tenant'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id","_cust"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant","johnson brothers"]]}]}]}`, - }, - &Query{ - name: "string AND query, all fields in SELECT, one in parenthesis", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id,tenant_id FROM cpu WHERE alert_id='alert' AND (tenant_id='tenant')`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id","tenant_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert","tenant"]]}]}]}`, - }, - &Query{ - name: "string underscored field", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='johnson brothers'`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","alert_id"],"values":[["2015-02-28T01:03:36.703820946Z","alert"]]}]}]}`, - }, - &Query{ - name: "string no match", - params: url.Values{"db": []string{"db0"}}, - command: `SELECT alert_id FROM cpu WHERE _cust='acme'`, - exp: `{"results":[{}]}`, - }, - - // float64 - &Query{ - name: "float64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 100`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load >= 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 LTE match two", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load <= 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100],["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - &Query{ - name: "float64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load > 99`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:00:02Z",100]]}]}]}`, - }, - &Query{ - name: "float64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load = 99`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 99`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - &Query{ - name: "float64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load < 80`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "float64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select load from cpu where load != 100`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","load"],"values":[["2009-11-10T23:01:02Z",80]]}]}]}`, - }, - - // int64 - &Query{ - name: "int64 GT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 4`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 GTE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core >= 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 EQ match upper bound", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 LTE match two ", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core <= 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4],["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - &Query{ - name: "int64 GT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core > 3`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:00:02Z",4]]}]}]}`, - }, - &Query{ - name: "int64 EQ no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core = 3`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 LT match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 3`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - &Query{ - name: "int64 LT no match", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core < 2`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: "int64 NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select core from cpu where core != 4`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","core"],"values":[["2009-11-10T23:01:02Z",2]]}]}]}`, - }, - - // bool - &Query{ - name: "bool EQ match true", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = true`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:01Z",true]]}]}]}`, - }, - &Query{ - name: "bool EQ match false", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local = false`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - - &Query{ - name: "bool NE match one", - params: url.Values{"db": []string{"db0"}}, - command: `select local from clicks where local != true`, - exp: `{"results":[{"series":[{"name":"clicks","columns":["time","local"],"values":[["2014-11-10T23:00:02Z",false]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Where_With_Tags(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`where_events,tennant=paul foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="baz" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=paul foo="bat" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=todd foo="bar" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`where_events,tennant=david foo="bap" %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "tag field and time", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from where_events where (tennant = 'paul' OR tennant = 'david') AND time > 1s AND (foo = 'bar' OR foo = 'baz' OR foo = 'bap')`, - exp: `{"results":[{"series":[{"name":"where_events","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z","bar"],["2009-11-10T23:00:03Z","baz"],["2009-11-10T23:00:06Z","bap"]]}]}]}`, - }, - &Query{ - name: "where on tag that should be double quoted but isn't", - params: url.Values{"db": []string{"db0"}}, - command: `show series where data-center = 'foo'`, - exp: `{"results":[{"error":"invalid expression: data - center = 'foo'"}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_LimitAndOffset(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`limited,tennant=paul foo=2 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`limited,tennant=paul foo=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`limited,tennant=todd foo=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "limit on points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - }, - &Query{ - name: "limit higher than the number of data points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - &Query{ - name: "limit and offset", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 2 OFFSET 1`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - }, - &Query{ - name: "limit + offset equal to total number of points", - params: url.Values{"db": []string{"db0"}}, - command: `select foo from "limited" LIMIT 3 OFFSET 3`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - }, - &Query{ - name: "limit - offset higher than number of points", - command: `select foo from "limited" LIMIT 2 OFFSET 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","foo"]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit on points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit higher than the number of data points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 20`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2],["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4],["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit and offset with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 1`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:03Z",3],["2009-11-10T23:00:04Z",4]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit + offset equal to the number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 3 OFFSET 3`, - exp: `{"results":[{"series":[{"name":"limited","columns":["time","mean"],"values":[["2009-11-10T23:00:05Z",5]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit - offset higher than number of points with group by time", - command: `select mean(foo) from "limited" WHERE time >= '2009-11-10T23:00:02Z' AND time < '2009-11-10T23:00:06Z' GROUP BY TIME(1s) LIMIT 2 OFFSET 20`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit higher than the number of data points should error", - command: `select mean(foo) from "limited" where time > '2000-01-01T00:00:00Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"error":"too many points in the group by interval. maybe you forgot to specify a where time clause?"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "limit1 higher than MaxGroupBy but the number of data points is less than MaxGroupBy", - command: `select mean(foo) from "limited" where time >= '2009-11-10T23:00:02Z' and time < '2009-11-10T23:00:03Z' group by time(1s), * fill(0) limit 2147483647`, - exp: `{"results":[{"series":[{"name":"limited","tags":{"tennant":"paul"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",2]]},{"name":"limited","tags":{"tennant":"todd"},"columns":["time","mean"],"values":[["2009-11-10T23:00:02Z",0]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Fill(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`fills val=3 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`fills val=5 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`fills val=4 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`fills val=10 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:16Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "fill with value", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with value, WHERE all values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val < 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with value, WHERE no values match condition", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 50 group by time(5s) FILL(1)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",1],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with previous", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(previous)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with none, i.e. clear out nulls", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) FILL(none)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill defaults to null", - command: `select mean(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","mean"],"values":[["2009-11-10T23:00:00Z",4],["2009-11-10T23:00:05Z",4],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",10]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate defaults to null", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate defaults to null, no values match", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' and val > 100 group by time(5s)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",null],["2009-11-10T23:00:05Z",null],["2009-11-10T23:00:10Z",null],["2009-11-10T23:00:15Z",null]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "fill with count aggregate specific value", - command: `select count(val) from fills where time >= '2009-11-10T23:00:00Z' and time < '2009-11-10T23:00:20Z' group by time(5s) fill(1234)`, - exp: `{"results":[{"series":[{"name":"fills","columns":["time","count"],"values":[["2009-11-10T23:00:00Z",2],["2009-11-10T23:00:05Z",1],["2009-11-10T23:00:10Z",1234],["2009-11-10T23:00:15Z",1]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_Chunk(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := make([]string, 10001) // 10,000 is the default chunking size, even when no chunking requested. - expectedValues := make([]string, len(writes)) - for i := 0; i < len(writes); i++ { - writes[i] = fmt.Sprintf(`cpu value=%d %d`, i, time.Unix(0, int64(i)).UnixNano()) - expectedValues[i] = fmt.Sprintf(`["%s",%d]`, time.Unix(0, int64(i)).UTC().Format(time.RFC3339Nano), i) - } - expected := fmt.Sprintf(`{"results":[{"series":[{"name":"cpu","columns":["time","value"],"values":[%s]}]}]}`, strings.Join(expectedValues, ",")) - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "SELECT all values, no chunking", - command: `SELECT value FROM cpu`, - exp: expected, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - -} - -func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=serverA,region=uswest val=23.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf(`memory,host=serverB,region=uswest val=33.2 %d`, mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:01Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "Drop Measurement, series tags preserved tests", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show series", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=serverA,region=uswest","serverA","uswest"]]},{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "ensure we can query for memory with both tags", - command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "drop measurement cpu", - command: `DROP MEASUREMENT cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify measurements", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify series", - command: `SHOW SERIES`, - exp: `{"results":[{"series":[{"name":"memory","columns":["_key","host","region"],"values":[["memory,host=serverB,region=uswest","serverB","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify cpu measurement is gone", - command: `SELECT * FROM cpu`, - exp: `{"results":[{}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'host' still works", - command: `SELECT * FROM memory where host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'region' still works", - command: `SELECT * FROM memory where region='uswest' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify selecting from a tag 'host' and 'region' still works", - command: `SELECT * FROM memory where region='uswest' and host='serverB' GROUP BY *`, - exp: `{"results":[{"series":[{"name":"memory","tags":{"host":"serverB","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:01Z",33.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "Drop non-existant measurement", - command: `DROP MEASUREMENT doesntexist`, - exp: `{"results":[{"error":"measurement not found: doesntexist"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - // Test that re-inserting the measurement works fine. - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - test = NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "verify measurements after recreation", - command: `SHOW MEASUREMENTS`, - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["memory"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "verify cpu measurement has been re-inserted", - command: `SELECT * FROM cpu GROUP BY *`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"serverA","region":"uswest"},"columns":["time","val"],"values":[["2000-01-01T00:00:00Z",23.2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowSeries(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:02Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:03Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:04Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:05Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:06Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:07Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show series`, - command: "SHOW SERIES", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series from measurement`, - command: "SHOW SERIES FROM cpu", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series from regular expression`, - command: "SHOW SERIES FROM /[cg]pu/", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01","server01",""],["cpu,host=server01,region=uswest","server01","uswest"],["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server02,region=useast","server02","useast"],["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with where tag`, - command: "SHOW SERIES WHERE region = 'uswest'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=uswest","server01","uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series where tag matches regular expression`, - command: "SHOW SERIES WHERE region =~ /ca.*/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series`, - command: "SHOW SERIES WHERE host !~ /server0[12]/", - exp: `{"results":[{"series":[{"name":"disk","columns":["_key","host","region"],"values":[["disk,host=server03,region=caeast","server03","caeast"]]},{"name":"gpu","columns":["_key","host","region"],"values":[["gpu,host=server03,region=caeast","server03","caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show series with from and where`, - command: "SHOW SERIES FROM cpu WHERE region = 'useast'", - exp: `{"results":[{"series":[{"name":"cpu","columns":["_key","host","region"],"values":[["cpu,host=server01,region=useast","server01","useast"],["cpu,host=server02,region=useast","server02","useast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowMeasurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`other,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show measurements with limit 2`, - command: "SHOW MEASUREMENTS LIMIT 2", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"],["gpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements where tag matches regular expression`, - command: "SHOW MEASUREMENTS WHERE region =~ /ca.*/", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["gpu"],["other"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show measurements where tag does not match a regular expression`, - command: "SHOW MEASUREMENTS WHERE region !~ /ca.*/", - exp: `{"results":[{"series":[{"name":"measurements","columns":["name"],"values":[["cpu"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowTagKeys(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server02,region=useast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast value=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show tag keys`, - command: "SHOW TAG KEYS", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"disk","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys from", - command: "SHOW TAG KEYS FROM cpu", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys from regex", - command: "SHOW TAG KEYS FROM /[cg]pu/", - exp: `{"results":[{"series":[{"name":"cpu","columns":["tagKey"],"values":[["host"],["region"]]},{"name":"gpu","columns":["tagKey"],"values":[["host"],["region"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag keys measurement not found", - command: "SHOW TAG KEYS FROM bad", - exp: `{"results":[{"error":"measurement not found: bad"}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: "show tag values with key", - command: "SHOW TAG VALUES WITH KEY = host", - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where`, - command: `SHOW TAG VALUES FROM cpu WITH KEY = host WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where matches regular expression`, - command: `SHOW TAG VALUES WITH KEY = host WHERE region =~ /ca.*/`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and where does not matche regular expression`, - command: `SHOW TAG VALUES WITH KEY = region WHERE host !~ /server0[12]/`, - exp: `{"results":[{"series":[{"name":"regionTagValues","columns":["region"],"values":[["caeast"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key in and where does not matche regular expression`, - command: `SHOW TAG VALUES FROM cpu WITH KEY IN (host, region) WHERE region = 'uswest'`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"]]},{"name":"regionTagValues","columns":["region"],"values":[["uswest"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show tag values with key and measurement matches regular expression`, - command: `SHOW TAG VALUES FROM /[cg]pu/ WITH KEY = host`, - exp: `{"results":[{"series":[{"name":"hostTagValues","columns":["host"],"values":[["server01"],["server02"],["server03"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_ShowFieldKeys(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - writes := []string{ - fmt.Sprintf(`cpu,host=server01 field1=100 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=uswest field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server01,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`cpu,host=server02,region=useast field1=200,field2=300,field3=400 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server01,region=useast field4=200,field5=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`gpu,host=server03,region=caeast field6=200,field7=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - fmt.Sprintf(`disk,host=server03,region=caeast field8=200,field9=300 %d`, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:00Z").UnixNano()), - } - - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: `show field keys`, - command: `SHOW FIELD KEYS`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"disk","columns":["fieldKey"],"values":[["field8"],["field9"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show field keys from measurement`, - command: `SHOW FIELD KEYS FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - &Query{ - name: `show field keys measurement with regex`, - command: `SHOW FIELD KEYS FROM /[cg]pu/`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["fieldKey"],"values":[["field1"],["field2"],["field3"]]},{"name":"gpu","columns":["fieldKey"],"values":[["field4"],["field5"],["field6"],["field7"]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -func TestServer_Query_CreateContinuousQuery(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - test.addQueries([]*Query{ - &Query{ - name: "create continuous query", - command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT count(value) INTO measure1 FROM myseries GROUP BY time(10m) END`, - exp: `{"results":[{}]}`, - }, - &Query{ - name: `show continuous queries`, - command: `SHOW CONTINUOUS QUERIES`, - exp: `{"results":[{"series":[{"name":"db0","columns":["name","query"],"values":[["my.query","CREATE CONTINUOUS QUERY \"my.query\" ON db0 BEGIN SELECT count(value) INTO \"db0\".\"rp0\".measure1 FROM \"db0\".\"rp0\".myseries GROUP BY time(10m) END"]]}]}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} - -// Tests that a known CQ query with concurrent writes does not deadlock the server -func TestServer_ContinuousQuery_Deadlock(t *testing.T) { - - // Skip until #3517 & #3522 are merged - t.Skip("Skipping CQ deadlock test") - if testing.Short() { - t.Skip("skipping CQ deadlock test") - } - t.Parallel() - s := OpenServer(NewConfig(), "") - defer func() { - s.Close() - // Nil the server so our deadlock detector goroutine can determine if we completed writes - // without timing out - s.Server = nil - }() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - - test.addQueries([]*Query{ - &Query{ - name: "create continuous query", - command: `CREATE CONTINUOUS QUERY "my.query" ON db0 BEGIN SELECT sum(visits) as visits INTO test_1m FROM myseries GROUP BY time(1m), host END`, - exp: `{"results":[{}]}`, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } - - // Deadlock detector. If the deadlock is fixed, this test should complete all the writes in ~2.5s seconds (with artifical delays - // added). After 10 seconds, if the server has not been closed then we hit the deadlock bug. - iterations := 0 - go func(s *Server) { - <-time.After(10 * time.Second) - - // If the server is not nil then the test is still running and stuck. We panic to avoid - // having the whole test suite hang indefinitely. - if s.Server != nil { - panic("possible deadlock. writes did not complete in time") - } - }(s) - - for { - - // After the second write, if the deadlock exists, we'll get a write timeout and - // all subsequent writes will timeout - if iterations > 5 { - break - } - writes := []string{} - for i := 0; i < 1000; i++ { - writes = append(writes, fmt.Sprintf(`myseries,host=host-%d visits=1i`, i)) - } - write := strings.Join(writes, "\n") - - if _, err := s.Write(test.db, test.rp, write, test.params); err != nil { - t.Fatal(err) - } - iterations += 1 - time.Sleep(500 * time.Millisecond) - } -} - -func TestServer_Query_EvilIdentifiers(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 0)); err != nil { - t.Fatal(err) - } - if err := s.MetaStore.SetDefaultRetentionPolicy("db0", "rp0"); err != nil { - t.Fatal(err) - } - - test := NewTest("db0", "rp0") - test.write = fmt.Sprintf("cpu select=1,in-bytes=2 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()) - - test.addQueries([]*Query{ - &Query{ - name: `query evil identifiers`, - command: `SELECT "select", "in-bytes" FROM cpu`, - exp: `{"results":[{"series":[{"name":"cpu","columns":["time","select","in-bytes"],"values":[["2000-01-01T00:00:00Z",1,2]]}]}]}`, - params: url.Values{"db": []string{"db0"}}, - }, - }...) - - for i, query := range test.queries { - if i == 0 { - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - } - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md deleted file mode 100644 index 2b6883de7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/cmd/influxd/run/server_test.md +++ /dev/null @@ -1,150 +0,0 @@ -# Server Integration Tests - -Currently, the file `server_test.go` has integration tests for single node scenarios. -At some point we'll need to add cluster tests, and may add them in a different file, or -rename `server_test.go` to `server_single_node_test.go` or something like that. - -## What is in a test? - -Each test is broken apart effectively into the following areas: - -- Write sample data -- Use cases for table driven test, that include a command (typically a query) and an expected result. - -When each test runs it does the following: - -- init: determines if there are any writes and if so, writes them to the in-memory database -- queries: iterate through each query, executing the command, and comparing the results to the expected result. - -## Idempotent - Allows for parallel tests - -Each test should be `idempotent`, meaining that its data will not be affected by other tests, or use cases within the table tests themselves. -This allows for parallel testing, keeping the test suite total execution time very low. - -### Basic sample test - -```go -// Ensure the server can have a database with multiple measurements. -func TestServer_Query_Multiple_Measurements(t *testing.T) { - t.Parallel() - s := OpenServer(NewConfig(), "") - defer s.Close() - - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) - } - - // Make sure we do writes for measurements that will span across shards - writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), - } - test := NewTest("db0", "rp0") - test.write = strings.Join(writes, "\n") - - test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - }...) - - if err := test.init(s); err != nil { - t.Fatalf("test init failed: %s", err) - } - - for _, query := range test.queries { - if query.skip { - t.Logf("SKIP:: %s", query.name) - continue - } - if err := query.Execute(s); err != nil { - t.Error(query.Error(err)) - } else if !query.success() { - t.Error(query.failureMessage()) - } - } -} -``` - -Let's break this down: - -In this test, we first tell it to run in parallel with the `t.Parallel()` call. - -We then open a new server with: - -```go -s := OpenServer(NewConfig(), "") -defer s.Close() -``` - -If needed, we create a database and default retention policy. This is usually needed -when inserting and querying data. This is not needed if you are testing commands like `CREATE DATABASE`, `SHOW DIAGNOSTICS`, etc. - -```go -if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicyInfo("rp0", 1, 1*time.Hour)); err != nil { - t.Fatal(err) -} -``` - -Next, set up the write data you need: - -```go -writes := []string{ - fmt.Sprintf("cpu,host=server01 value=100,core=4 %d", mustParseTime(time.RFC3339Nano, "2000-01-01T00:00:00Z").UnixNano()), - fmt.Sprintf("cpu1,host=server02 value=50,core=2 %d", mustParseTime(time.RFC3339Nano, "2015-01-01T00:00:00Z").UnixNano()), -} -``` -Create a new test with the database and retention policy: - -```go -test := NewTest("db0", "rp0") -``` - -Send in the writes: -```go -test.write = strings.Join(writes, "\n") -``` - -Add some queries (the second one is mocked out to show how to add more than one): - -```go -test.addQueries([]*Query{ - &Query{ - name: "measurement in one shard but not another shouldn't panic server", - command: `SELECT host,value FROM db0.rp0.cpu`, - exp: `{"results":[{"series":[{"name":"cpu","tags":{"host":"server01"},"columns":["time","value"],"values":[["2000-01-01T00:00:00Z",100]]}]}]}`, - }, - &Query{ - name: "another test here...", - command: `Some query command`, - exp: `the expected results`, - }, -}...) -``` - -The rest of the code is boilerplate execution code. It is purposefully not refactored out to a helper -to make sure the test failure reports the proper lines for debugging purposes. - -#### Running the tests - -To run the tests: - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -``` - -#### Running a specific test - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -``` - -#### Verbose feedback - -By default, all logs are silenced when testing. If you pass in the `-v` flag, the test suite becomes verbose, and enables all logging in the system - -```sh -go test ./cmd/influxd/run -parallel 500 -timeout 10s -run TestServer_Query_Fill -v -``` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go deleted file mode 100644 index c1e02565b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/diagnostics.go +++ /dev/null @@ -1,143 +0,0 @@ -package influxdb - -import ( - "os" - "runtime" - "time" - - "github.com/influxdb/influxdb/influxql" -) - -// GoDiagnostics captures basic information about the runtime. -type GoDiagnostics struct { - GoMaxProcs int - NumGoroutine int - Version string -} - -// NewGoDiagnostics returns a GoDiagnostics object. -func NewGoDiagnostics() *GoDiagnostics { - return &GoDiagnostics{ - GoMaxProcs: runtime.GOMAXPROCS(0), - NumGoroutine: runtime.NumGoroutine(), - Version: runtime.Version(), - } -} - -// AsRow returns the GoDiagnostic object as an InfluxQL row. -func (g *GoDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row { - return &influxql.Row{ - Name: measurement, - Columns: []string{"time", "goMaxProcs", "numGoRoutine", "version"}, - Tags: tags, - Values: [][]interface{}{[]interface{}{time.Now().UTC(), - g.GoMaxProcs, g.NumGoroutine, g.Version}}, - } -} - -// SystemDiagnostics captures basic machine data. -type SystemDiagnostics struct { - Hostname string - PID int - OS string - Arch string - NumCPU int -} - -// NewSystemDiagnostics returns a SystemDiagnostics object. -func NewSystemDiagnostics() *SystemDiagnostics { - hostname, err := os.Hostname() - if err != nil { - hostname = "unknown" - } - - return &SystemDiagnostics{ - Hostname: hostname, - PID: os.Getpid(), - OS: runtime.GOOS, - Arch: runtime.GOARCH, - NumCPU: runtime.NumCPU(), - } -} - -// AsRow returns the GoDiagnostic object as an InfluxQL row. -func (s *SystemDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row { - return &influxql.Row{ - Name: measurement, - Columns: []string{"time", "hostname", "pid", "os", "arch", "numCPU"}, - Tags: tags, - Values: [][]interface{}{[]interface{}{time.Now().UTC(), - s.Hostname, s.PID, s.OS, s.Arch, s.NumCPU}}, - } -} - -// MemoryDiagnostics captures Go memory stats. -type MemoryDiagnostics struct { - Alloc int64 - TotalAlloc int64 - Sys int64 - Lookups int64 - Mallocs int64 - Frees int64 - HeapAlloc int64 - HeapSys int64 - HeapIdle int64 - HeapInUse int64 - HeapReleased int64 - HeapObjects int64 - PauseTotalNs int64 - NumGC int64 -} - -// NewMemoryDiagnostics returns a MemoryDiagnostics object. -func NewMemoryDiagnostics() *MemoryDiagnostics { - var m runtime.MemStats - runtime.ReadMemStats(&m) - - return &MemoryDiagnostics{ - Alloc: int64(m.Alloc), - TotalAlloc: int64(m.TotalAlloc), - Sys: int64(m.Sys), - Lookups: int64(m.Lookups), - Mallocs: int64(m.Mallocs), - Frees: int64(m.Frees), - HeapAlloc: int64(m.HeapAlloc), - HeapSys: int64(m.HeapSys), - HeapIdle: int64(m.HeapIdle), - HeapInUse: int64(m.HeapInuse), - HeapReleased: int64(m.HeapReleased), - HeapObjects: int64(m.HeapObjects), - PauseTotalNs: int64(m.PauseTotalNs), - NumGC: int64(m.NumGC), - } -} - -// AsRow returns the MemoryDiagnostics object as an InfluxQL row. -func (m *MemoryDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row { - return &influxql.Row{ - Name: measurement, - Columns: []string{"time", "alloc", "totalAlloc", "sys", "lookups", "mallocs", "frees", "heapAlloc", - "heapSys", "heapIdle", "heapInUse", "heapReleased", "heapObjects", "pauseTotalNs", "numGG"}, - Tags: tags, - Values: [][]interface{}{[]interface{}{time.Now().UTC(), - m.Alloc, m.TotalAlloc, m.Sys, m.Lookups, m.Mallocs, m.Frees, m.HeapAlloc, - m.HeapSys, m.HeapIdle, m.HeapInUse, m.HeapReleased, m.HeapObjects, m.PauseTotalNs, m.NumGC}}, - } -} - -// BuildDiagnostics capture basic build version information. -type BuildDiagnostics struct { - Version string - CommitHash string -} - -// AsRow returns the BuildDiagnostics object as an InfluxQL row. -func (b *BuildDiagnostics) AsRow(measurement string, tags map[string]string) *influxql.Row { - return &influxql.Row{ - Name: measurement, - Columns: []string{"time", "version", "commitHash"}, - Tags: tags, - Values: [][]interface{}{[]interface{}{time.Now().UTC(), - b.Version, b.CommitHash}}, - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go deleted file mode 100644 index c18f2c449..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -package influxdb - -import ( - "encoding/json" - "errors" - "fmt" - "runtime" - "strings" -) - -var ( - // ErrFieldsRequired is returned when a point does not any fields. - ErrFieldsRequired = errors.New("fields required") - - // ErrFieldTypeConflict is returned when a new field already exists with a different type. - ErrFieldTypeConflict = errors.New("field type conflict") -) - -func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } - -func ErrMeasurementNotFound(name string) error { return fmt.Errorf("measurement not found: %s", name) } - -func Errorf(format string, a ...interface{}) (err error) { - if _, file, line, ok := runtime.Caller(2); ok { - a = append(a, file, line) - err = fmt.Errorf(format+" (%s:%d)", a...) - } else { - err = fmt.Errorf(format, a...) - } - return -} - -// IsClientError indicates whether an error is a known client error. -func IsClientError(err error) bool { - if err == nil { - return false - } - - if err == ErrFieldsRequired { - return true - } - if err == ErrFieldTypeConflict { - return true - } - - if strings.Contains(err.Error(), ErrFieldTypeConflict.Error()) { - return true - } - - return false -} - -// mustMarshal encodes a value to JSON. -// This will panic if an error occurs. This should only be used internally when -// an invalid marshal will cause corruption and a panic is appropriate. -func mustMarshalJSON(v interface{}) []byte { - b, err := json.Marshal(v) - if err != nil { - panic("marshal: " + err.Error()) - } - return b -} - -// mustUnmarshalJSON decodes a value from JSON. -// This will panic if an error occurs. This should only be used internally when -// an invalid unmarshal will cause corruption and a panic is appropriate. -func mustUnmarshalJSON(b []byte, v interface{}) { - if err := json.Unmarshal(b, v); err != nil { - panic("unmarshal: " + err.Error()) - } -} - -// assert will panic with a given formatted message if the given condition is false. -func assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assert failed: "+msg, v...)) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc deleted file mode 100644 index a9c1a9ca3..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/.rvmrc +++ /dev/null @@ -1 +0,0 @@ -rvm use ruby-2.1.0@burn-in --create diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile deleted file mode 100644 index b1816e8b6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source 'https://rubygems.org' - -gem "colorize" -gem "influxdb" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock deleted file mode 100644 index 9e721c3a7..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/Gemfile.lock +++ /dev/null @@ -1,14 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - colorize (0.6.0) - influxdb (0.0.16) - json - json (1.8.1) - -PLATFORMS - ruby - -DEPENDENCIES - colorize - influxdb diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb deleted file mode 100644 index 1d44bc2c0..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/burn-in.rb +++ /dev/null @@ -1,79 +0,0 @@ -require "influxdb" -require "colorize" -require "benchmark" - -require_relative "log" -require_relative "random_gaussian" - -BATCH_SIZE = 10_000 - -Log.info "Starting burn-in suite" -master = InfluxDB::Client.new -master.delete_database("burn-in") rescue nil -master.create_database("burn-in") -master.create_database_user("burn-in", "user", "pass") - -master.database = "burn-in" -# master.query "select * from test1 into test2;" -# master.query "select count(value) from test1 group by time(1m) into test2;" - -influxdb = InfluxDB::Client.new "burn-in", username: "user", password: "pass" - -Log.success "Connected to server #{influxdb.host}:#{influxdb.port}" - -Log.log "Creating RandomGaussian(500, 25)" -gaussian = RandomGaussian.new(500, 25) -point_count = 0 - -while true - Log.log "Generating 10,000 points.." - points = [] - BATCH_SIZE.times do |n| - points << {value: gaussian.rand.to_i.abs} - end - point_count += points.length - - Log.info "Sending points to server.." - begin - st = Time.now - foo = influxdb.write_point("test1", points) - et = Time.now - Log.log foo.inspect - Log.log "#{et-st} seconds elapsed" - Log.success "Write successful." - rescue => e - Log.failure "Write failed:" - Log.log e - end - sleep 0.5 - - Log.info "Checking regular points" - st = Time.now - response = influxdb.query("select count(value) from test1;") - et = Time.now - - Log.log "#{et-st} seconds elapsed" - - response_count = response["test1"].first["count"] - if point_count == response_count - Log.success "Point counts match: #{point_count} == #{response_count}" - else - Log.failure "Point counts don't match: #{point_count} != #{response_count}" - end - - # Log.info "Checking continuous query points for test2" - # st = Time.now - # response = influxdb.query("select count(value) from test2;") - # et = Time.now - - # Log.log "#{et-st} seconds elapsed" - - # response_count = response["test2"].first["count"] - # if point_count == response_count - # Log.success "Point counts match: #{point_count} == #{response_count}" - # else - # Log.failure "Point counts don't match: #{point_count} != #{response_count}" - # end -end - - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb deleted file mode 100644 index 0f70d7633..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/log.rb +++ /dev/null @@ -1,23 +0,0 @@ -module Log - def self.info(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:yellow) - end - - def self.success(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:green) - end - - def self.failure(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s.colorize(:red) - end - - def self.log(msg) - print Time.now.strftime("%r") + " | " - puts msg.to_s - end -end - - diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb deleted file mode 100644 index 51d6c3c04..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_gaussian.rb +++ /dev/null @@ -1,31 +0,0 @@ -class RandomGaussian - def initialize(mean, stddev, rand_helper = lambda { Kernel.rand }) - @rand_helper = rand_helper - @mean = mean - @stddev = stddev - @valid = false - @next = 0 - end - - def rand - if @valid then - @valid = false - return @next - else - @valid = true - x, y = self.class.gaussian(@mean, @stddev, @rand_helper) - @next = y - return x - end - end - - private - def self.gaussian(mean, stddev, rand) - theta = 2 * Math::PI * rand.call - rho = Math.sqrt(-2 * Math.log(1 - rand.call)) - scale = stddev * rho - x = mean + scale * Math.cos(theta) - y = mean + scale * Math.sin(theta) - return x, y - end -end diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb deleted file mode 100644 index 93bc8314f..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/burn-in/random_points.rb +++ /dev/null @@ -1,29 +0,0 @@ -require "influxdb" - -ONE_WEEK_IN_SECONDS = 7*24*60*60 -NUM_POINTS = 10_000 -BATCHES = 100 - -master = InfluxDB::Client.new -master.delete_database("ctx") rescue nil -master.create_database("ctx") - -influxdb = InfluxDB::Client.new "ctx" -influxdb.time_precision = "s" - -names = ["foo", "bar", "baz", "quu", "qux"] - -st = Time.now -BATCHES.times do |m| - points = [] - - puts "Writing #{NUM_POINTS} points, time ##{m}.." - NUM_POINTS.times do |n| - timestamp = Time.now.to_i - rand(ONE_WEEK_IN_SECONDS) - points << {value: names.sample, time: timestamp} - end - - influxdb.write_point("ct1", points) -end -puts st -puts Time.now diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml b/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml deleted file mode 100644 index 9614277ab..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/etc/config.sample.toml +++ /dev/null @@ -1,246 +0,0 @@ -### Welcome to the InfluxDB configuration file. - -# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com -# The data includes raft id (random 8 bytes), os, arch, version, and metadata. -# We don't track ip addresses of servers reporting. This is only used -# to track the number of instances running and the versions, which -# is very helpful for us. -# Change this option to true to disable reporting. -reporting-disabled = false - -### -### [meta] -### -### Controls the parameters for the Raft consensus group that stores metadata -### about the InfluxDB cluster. -### - -[meta] - dir = "/var/opt/influxdb/meta" - hostname = "localhost" - bind-address = ":8088" - retention-autocreate = true - election-timeout = "1s" - heartbeat-timeout = "1s" - leader-lease-timeout = "500ms" - commit-timeout = "50ms" - -### -### [data] -### -### Controls where the actual shard data for InfluxDB lives and how it is -### flushed from the WAL. "dir" may need to be changed to a suitable place -### for your system, but the WAL settings are an advanced configuration. The -### defaults should work for most systems. -### - -[data] - dir = "/var/opt/influxdb/data" - - # The following WAL settings are for the b1 storage engine used in 0.9.2. They won't - # apply to any new shards created after upgrading to a version > 0.9.3. - max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. - wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush. - wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. - - # These are the WAL settings for the storage engine >= 0.9.3 - wal-dir = "/var/opt/influxdb/wal" - wal-enable-logging = true - - # When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to - # flush to the index - # wal-ready-series-size = 25600 - - # Flush and compact a partition once this ratio of series are over the ready size - # wal-compaction-threshold = 0.6 - - # Force a flush and compaction if any series in a partition gets above this size in bytes - # wal-max-series-size = 2097152 - - # Force a flush of all series and full compaction if there have been no writes in this - # amount of time. This is useful for ensuring that shards that are cold for writes don't - # keep a bunch of data cached in memory and in the WAL. - # wal-flush-cold-interval = "10m" - - # Force a partition to flush its largest series if it reaches this approximate size in - # bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. - # The more memory you have, the bigger this can be. - # wal-partition-size-threshold = 20971520 - -### -### [cluster] -### -### Controls non-Raft cluster behavior, which generally includes how data is -### shared across shards. -### - -[cluster] - shard-writer-timeout = "5s" # The time within which a shard must respond to write. - write-timeout = "5s" # The time within which a write operation must complete on the cluster. - -### -### [retention] -### -### Controls the enforcement of retention policies for evicting old data. -### - -[retention] - enabled = true - check-interval = "10m" - -### -### [admin] -### -### Controls the availability of the built-in, web-based admin interface. If HTTPS is -### enabled for the admin interface, HTTPS must also be enabled on the [http] service. -### - -[admin] - enabled = true - bind-address = ":8083" - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [http] -### -### Controls how the HTTP endpoints are configured. These are the primary -### mechanism for getting data into and out of InfluxDB. -### - -[http] - enabled = true - bind-address = ":8086" - auth-enabled = false - log-enabled = true - write-tracing = false - pprof-enabled = false - https-enabled = false - https-certificate = "/etc/ssl/influxdb.pem" - -### -### [[graphite]] -### -### Controls one or many listeners for Graphite data. -### - -[[graphite]] - enabled = false - # bind-address = ":2003" - # protocol = "tcp" - # consistency-level = "one" - # name-separator = "." - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - - ## "name-schema" configures tag names for parsing the metric name from graphite protocol; - ## separated by `name-separator`. - ## The "measurement" tag is special and the corresponding field will become - ## the name of the metric. - ## e.g. "type.host.measurement.device" will parse "server.localhost.cpu.cpu0" as - ## { - ## measurement: "cpu", - ## tags: { - ## "type": "server", - ## "host": "localhost, - ## "device": "cpu0" - ## } - ## } - # name-schema = "type.host.measurement.device" - - ## If set to true, when the input metric name has more fields than `name-schema` specified, - ## the extra fields will be ignored. - ## Otherwise an error will be logged and the metric rejected. - # ignore-unnamed = true - -### -### [collectd] -### -### Controls the listener for collectd data. -### - -[collectd] - enabled = false - # bind-address = "" - # database = "" - # typesdb = "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - -### -### [opentsdb] -### -### Controls the listener for OpenTSDB data. -### - -[opentsdb] - enabled = false - # bind-address = "" - # database = "" - # retention-policy = "" - -### -### [[udp]] -### -### Controls the listeners for InfluxDB line protocol data via UDP. -### - -[[udp]] - enabled = false - # bind-address = "" - # database = "" - - # These next lines control how batching works. You should have this enabled - # otherwise you could get dropped metrics or poor performance. Batching - # will buffer points in memory if you have many coming in. - - # batch-size = 1000 # will flush if this many points get buffered - # batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit - -### -### [monitoring] -### - -[monitoring] - enabled = true - write-interval = "24h" - -### -### [continuous_queries] -### -### Controls how continuous queries are run within InfluxDB. -### - -[continuous_queries] - log-enabled = true - enabled = true - recompute-previous-n = 2 - recompute-no-older-than = "10m" - compute-runs-per-interval = 10 - compute-no-more-than = "2m" - -### -### [hinted-handoff] -### -### Controls the hinted handoff feature, which allows nodes to temporarily -### store queued data when one node of a cluster is down for a short period -### of time. -### - -[hinted-handoff] - enabled = true - dir = "/var/opt/influxdb/hh" - max-size = 1073741824 - max-age = "168h" - retry-rate-limit = 0 - retry-interval = "1s" diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md deleted file mode 100644 index 272fedf6c..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/README.md +++ /dev/null @@ -1,186 +0,0 @@ -# Import/Export - -## Exporting from 0.8.9 - -Version `0.8.9` of InfluxDB adds support to export your data to a format that can be imported into `0.9.3` and later. - -### Design - -`0.8.9` exports raw data to a flat file that includes two sections, `DDL` and `DML`. You can choose to export them independently (see below). - -The `DDL` section contains the sql commands to create databases and retention policies. the `DML` section is [line protocol](https://github.com/influxdb/influxdb/blob/master/tsdb/README.md) and can be directly posted to the [http endpoint](https://influxdb.com/docs/v0.9/guides/writing_data.html) in `0.9`. Remember that batching is important and we don't recommend batch sizes over 5k. - -You need to specify a database and shard group when you export. - -To list out your shards, use the following http endpoint: - -`/cluster/shard_spaces` - -example: -```sh -http://username:password@localhost:8086/cluster/shard_spaces -``` - -Then, to export a database with then name "metrics" and a shard space with the name "default", issue the following curl command: - -```sh -curl -o export http://username:password@http://localhost:8086/export/metrics/default -``` - -Compression is supported, and will result in a significantly smaller file size. - -Use the following command for compression: -```sh -curl -o export.gz --compressed http://username:password@http://localhost:8086/export/metrics/default -``` - -You can also export just the `DDL` with this option: - -```sh -curl -o export.ddl http://username:password@http://localhost:8086/export/metrics/default?l=ddl -``` - -Or just the `DML` with this option: - -```sh -curl -o export.dml.gz --compressed http://username:password@http://localhost:8086/export/metrics/default?l=dml -``` - -### Assumptions - -- Series name mapping follows these [guidelines](https://influxdb.com/docs/v0.8/advanced_topics/schema_design.html) -- Database name will map directly from `0.8` to `0.9` -- Shard Spaces map to Retention Policies -- Shard Space Duration is ignored, as in `0.9` we determine shard size automatically -- Regex is used to match the correct series names and only exports that data for the database -- Duration becomes the new Retention Policy duration - -- Users are not migrated due to inability to get passwords. Anyone using users will need to manually set these back up in `0.9` - -### Upgrade Recommendations - -It's recommended that you upgrade to `0.9.3` first and have all your writes going there. Then, on the `0.8.X` instances, upgrade to `0.8.9`. - -It is important that when exporting you change your config to allow for the http endpoints not timing out. To do so, make this change in your config: - -```toml -# Configure the http api -[api] -read-timeout = "0s" -``` - -### Exceptions - -If a series can't be exported to tags based on the guidelines mentioned above, -we will insert the entire series name as the measurement name. You can either -allow that to import into the new InfluxDB instance, or you can do your own -data massage on it prior to importing it. - -For example, if you have the following series name: - -``` -metric.disk.c.host.server01.single -``` - -It will export as exactly thta as the measurement name and no tags: - -``` -metric.disk.c.host.server01.single -``` - -### Export Metrics - -When you export, you will now get comments inline in the `DML`: - -`# Found 999 Series for export` - -As well as count totals for each series exported: - -`# Series FOO - Points Exported: 999` - -With a total at the bottom: - -`# Points Exported: 999` - -You can grep the file that was exported at the end to get all the export metrics: - -`cat myexport | grep Exported` - -## Importing - -Version `0.9.3` of InfluxDB adds support to import your data from version `0.8.9`. - -## Caveats - -For the export/import to work, all requisites have to be met. For export, all series names in `0.8` should be in the following format: - -``` -.... -``` -for example: -``` -az.us-west-1.host.serverA.cpu -``` -or any number of tags -``` -building.2.temperature -``` - -Additionally, the fields need to have a consistent type (all float64, int64, etc) for every write in `0.8`. Otherwise they have the potential to fail writes in the import. -See below for more information. - -## Running the import command - - To import via the cli, you can specify the following command: - - ```sh - influx -import -path=metrics-default.gz -compressed - ``` - - If the file is not compressed you can issue it without the `-compressed` flag: - - ```sh - influx -import -path=metrics-default - ``` - - To redirect failed import lines to another file, run this command: - - ```sh - influx -import -path=metrics-default.gz -compressed > failures - ``` - - The import will use the line protocol in batches of 5,000 lines per batch when sending data to the server. - -### Throttiling the import - - If you need to throttle the import so the database has time to ingest, you can use the `-pps` flag. This will limit the points per second that will be sent to the server. - - ```sh - influx -import -path=metrics-default.gz -compressed -pps 50000 > failures - ``` - - Which is stating that you don't want MORE than 50,000 points per second to write to the database. Due to the processing that is taking place however, you will likely never get exactly 50,000 pps, more like 35,000 pps, etc. - -## Understanding the results of the import - -During the import, a status message will write out for every 100,000 points imported and report stats on the progress of the import: - -``` -2015/08/21 14:48:01 Processed 3100000 lines. Time elapsed: 56.740578415s. Points per second (PPS): 54634 -``` - - The batch will give some basic stats when finished: - - ```sh - 2015/07/29 23:15:20 Processed 2 commands - 2015/07/29 23:15:20 Processed 70207923 inserts - 2015/07/29 23:15:20 Failed 29785000 inserts - ``` - - Most inserts fail due to the following types of error: - - ```sh - 2015/07/29 22:18:28 error writing batch: write failed: field type conflict: input field "value" on measurement "metric" is type float64, already exists as type integer - ``` - - This is due to the fact that in `0.8` a field could get created and saved as int or float types for independent writes. In `0.9` the field has to have a consistent type. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go deleted file mode 100644 index 5095868f3..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/importer/v8/importer.go +++ /dev/null @@ -1,236 +0,0 @@ -package v8 - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - "log" - "net/url" - "os" - "strings" - "time" - - "github.com/influxdb/influxdb/client" -) - -const batchSize = 5000 - -// Config is the config used to initialize a Importer importer -type Config struct { - Username string - Password string - URL url.URL - Precision string - WriteConsistency string - Path string - Version string - Compressed bool - PPS int -} - -// NewConfig returns an initialized *Config -func NewConfig() *Config { - return &Config{} -} - -// Importer is the importer used for importing 0.8 data -type Importer struct { - client *client.Client - database string - retentionPolicy string - config *Config - batch []string - totalInserts int - failedInserts int - totalCommands int - throttlePointsWritten int - lastWrite time.Time - throttle *time.Ticker -} - -// NewImporter will return an intialized Importer struct -func NewImporter(config *Config) *Importer { - return &Importer{ - config: config, - batch: make([]string, 0, batchSize), - } -} - -// Import processes the specified file in the Config and writes the data to the databases in chunks specified by batchSize -func (i *Importer) Import() error { - // Create a client and try to connect - config := client.NewConfig() - config.URL = i.config.URL - config.Username = i.config.Username - config.Password = i.config.Password - config.UserAgent = fmt.Sprintf("influxDB importer/%s", i.config.Version) - cl, err := client.NewClient(config) - if err != nil { - return fmt.Errorf("could not create client %s", err) - } - i.client = cl - if _, _, e := i.client.Ping(); e != nil { - return fmt.Errorf("failed to connect to %s\n", i.client.Addr()) - } - - // Validate args - if i.config.Path == "" { - return fmt.Errorf("file argument required") - } - - defer func() { - if i.totalInserts > 0 { - log.Printf("Processed %d commands\n", i.totalCommands) - log.Printf("Processed %d inserts\n", i.totalInserts) - log.Printf("Failed %d inserts\n", i.failedInserts) - } - }() - - // Open the file - f, err := os.Open(i.config.Path) - if err != nil { - return err - } - defer f.Close() - - var r io.Reader - - // If gzipped, wrap in a gzip reader - if i.config.Compressed { - gr, err := gzip.NewReader(f) - if err != nil { - return err - } - defer gr.Close() - // Set the reader to the gzip reader - r = gr - } else { - // Standard text file so our reader can just be the file - r = f - } - - // Get our reader - scanner := bufio.NewScanner(r) - - // Process the DDL - i.processDDL(scanner) - - // Set up our throttle channel. Since there is effectively no other activity at this point - // the smaller resolution gets us much closer to the requested PPS - i.throttle = time.NewTicker(time.Microsecond) - defer i.throttle.Stop() - - // Prime the last write - i.lastWrite = time.Now() - - // Process the DML - i.processDML(scanner) - - // Check if we had any errors scanning the file - if err := scanner.Err(); err != nil { - return fmt.Errorf("reading standard input: %s", err) - } - - return nil -} - -func (i *Importer) processDDL(scanner *bufio.Scanner) { - for scanner.Scan() { - line := scanner.Text() - // If we find the DML token, we are done with DDL - if strings.HasPrefix(line, "# DML") { - return - } - if strings.HasPrefix(line, "#") { - continue - } - i.queryExecutor(line) - } -} - -func (i *Importer) processDML(scanner *bufio.Scanner) { - start := time.Now() - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { - i.database = strings.TrimSpace(strings.Split(line, ":")[1]) - } - if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { - i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) - } - if strings.HasPrefix(line, "#") { - continue - } - i.batchAccumulator(line, start) - } -} - -func (i *Importer) execute(command string) { - response, err := i.client.Query(client.Query{Command: command, Database: i.database}) - if err != nil { - log.Printf("error: %s\n", err) - return - } - if err := response.Error(); err != nil { - log.Printf("error: %s\n", response.Error()) - } -} - -func (i *Importer) queryExecutor(command string) { - i.totalCommands++ - i.execute(command) -} - -func (i *Importer) batchAccumulator(line string, start time.Time) { - i.batch = append(i.batch, line) - if len(i.batch) == batchSize { - if e := i.batchWrite(); e != nil { - log.Println("error writing batch: ", e) - // Output failed lines to STDOUT so users can capture lines that failed to import - fmt.Println(strings.Join(i.batch, "\n")) - i.failedInserts += len(i.batch) - } else { - i.totalInserts += len(i.batch) - } - i.batch = i.batch[:0] - // Give some status feedback every 100000 lines processed - processed := i.totalInserts + i.failedInserts - if processed%100000 == 0 { - since := time.Since(start) - pps := float64(processed) / since.Seconds() - log.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) - } - } -} - -func (i *Importer) batchWrite() error { - // Accumulate the batch size to see how many points we have written this second - i.throttlePointsWritten += len(i.batch) - - // Find out when we last wrote data - since := time.Since(i.lastWrite) - - // Check to see if we've exceeded our points per second for the current timeframe - var currentPPS int - if since.Seconds() > 0 { - currentPPS = int(float64(i.throttlePointsWritten) / since.Seconds()) - } else { - currentPPS = i.throttlePointsWritten - } - - // If our currentPPS is greater than the PPS specified, then we wait and retry - if int(currentPPS) > i.config.PPS && i.config.PPS != 0 { - // Wait for the next tick - <-i.throttle.C - - // Decrement the batch size back out as it is going to get called again - i.throttlePointsWritten -= len(i.batch) - return i.batchWrite() - } - - _, e := i.client.WriteLineProtocol(strings.Join(i.batch, "\n"), i.database, i.retentionPolicy, i.config.Precision, i.config.WriteConsistency) - i.throttlePointsWritten = 0 - i.lastWrite = time.Now() - return e -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh deleted file mode 100644 index 9052201a2..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/nightly.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -REPO_DIR=`mktemp -d` -echo "Using $REPO_DIR for all work..." - -cd $REPO_DIR -export GOPATH=`pwd` -mkdir -p $GOPATH/src/github.com/influxdb -cd $GOPATH/src/github.com/influxdb -git clone https://github.com/influxdb/influxdb.git - -cd $GOPATH/src/github.com/influxdb/influxdb -NIGHTLY_BUILD=true ./package.sh 0.9.3-nightly-`git log --pretty=format:'%h' -n 1` -rm -rf $REPO_DIR diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh deleted file mode 100644 index c86a8b3eb..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/package.sh +++ /dev/null @@ -1,409 +0,0 @@ -#!/usr/bin/env bash - -########################################################################### -# Packaging script which creates debian and RPM packages. It optionally -# tags the repo with the given version. -# -# Requirements: GOPATH must be set. 'fpm' must be on the path, and the AWS -# CLI tools must also be installed. -# -# https://github.com/jordansissel/fpm -# http://aws.amazon.com/cli/ -# -# Packaging process: to package a build, simple execute: -# -# package.sh -# -# where is the desired version. If generation of a debian and RPM -# package is successful, the script will offer to tag the repo using the -# supplied version string. -# -# AWS upload: the script will also offer to upload the packages to S3. If -# this option is selected, the credentials should be present in the file -# ~/aws.conf. The contents should be of the form: -# -# [default] -# aws_access_key_id= -# aws_secret_access_key= -# region = us-east-1 -# -# Trim the leading spaces when creating the file. The script will exit if -# S3 upload is requested, but this file does not exist. - -[ -z $DEBUG ] || set -x - -AWS_FILE=~/aws.conf - -INSTALL_ROOT_DIR=/opt/influxdb -INFLUXDB_LOG_DIR=/var/log/influxdb -INFLUXDB_DATA_DIR=/var/opt/influxdb -CONFIG_ROOT_DIR=/etc/opt/influxdb - -SAMPLE_CONFIGURATION=etc/config.sample.toml -INITD_SCRIPT=scripts/init.sh - -TMP_WORK_DIR=`mktemp -d` -POST_INSTALL_PATH=`mktemp` -ARCH=`uname -i` -LICENSE=MIT -URL=influxdb.com -MAINTAINER=support@influxdb.com -VENDOR=Influxdb -DESCRIPTION="Distributed time-series database" - -# Allow path to FPM to be set by environment variables. Some execution contexts -# like cron don't have PATH set correctly to pick it up. -if [ -z "$FPM" ]; then - FPM=`which fpm` -fi - -GO_VERSION="go1.4.2" -GOPATH_INSTALL= -BINS=( - influxd - influx - ) - -########################################################################### -# Helper functions. - -# usage prints simple usage information. -usage() { - echo -e "$0 [] [-h]\n" - cleanup_exit $1 -} - -# cleanup_exit removes all resources created during the process and exits with -# the supplied returned code. -cleanup_exit() { - rm -r $TMP_WORK_DIR - rm $POST_INSTALL_PATH - exit $1 -} - -# current_branch echos the current git branch. -current_branch() { - echo `git rev-parse --abbrev-ref HEAD` -} - -# check_gopath sanity checks the value of the GOPATH env variable, and determines -# the path where build artifacts are installed. GOPATH may be a colon-delimited -# list of directories. -check_gopath() { - [ -z "$GOPATH" ] && echo "GOPATH is not set." && cleanup_exit 1 - GOPATH_INSTALL=`echo $GOPATH | cut -d ':' -f 1` - [ ! -d "$GOPATH_INSTALL" ] && echo "GOPATH_INSTALL is not a directory." && cleanup_exit 1 - echo "GOPATH ($GOPATH) looks sane, using $GOPATH_INSTALL for installation." -} - -check_gvm() { - if [ -n "$GOPATH" ]; then - existing_gopath=$GOPATH - fi - - source $HOME/.gvm/scripts/gvm - which gvm - if [ $? -ne 0 ]; then - echo "gvm not found -- aborting." - cleanup_exit $1 - fi - gvm use $GO_VERSION - if [ $? -ne 0 ]; then - echo "gvm cannot find Go version $GO_VERSION -- aborting." - cleanup_exit $1 - fi - - # Keep any existing GOPATH set. - if [ -n "$existing_gopath" ]; then - GOPATH=$existing_gopath - fi -} - -# check_clean_tree ensures that no source file is locally modified. -check_clean_tree() { - modified=$(git ls-files --modified | wc -l) - if [ $modified -ne 0 ]; then - echo "The source tree is not clean -- aborting." - cleanup_exit 1 - fi - echo "Git tree is clean." -} - -# update_tree ensures the tree is in-sync with the repo. -update_tree() { - git pull origin $TARGET_BRANCH - if [ $? -ne 0 ]; then - echo "Failed to pull latest code -- aborting." - cleanup_exit 1 - fi - git fetch --tags - if [ $? -ne 0 ]; then - echo "Failed to fetch tags -- aborting." - cleanup_exit 1 - fi - echo "Git tree updated successfully." -} - -# check_tag_exists checks if the existing release already exists in the tags. -check_tag_exists () { - version=$1 - git tag | grep -q "^v$version$" - if [ $? -eq 0 ]; then - echo "Proposed version $version already exists as a tag -- aborting." - cleanup_exit 1 - fi -} - -# make_dir_tree creates the directory structure within the packages. -make_dir_tree() { - work_dir=$1 - version=$2 - mkdir -p $work_dir/$INSTALL_ROOT_DIR/versions/$version/scripts - if [ $? -ne 0 ]; then - echo "Failed to create installation directory -- aborting." - cleanup_exit 1 - fi - mkdir -p $work_dir/$CONFIG_ROOT_DIR - if [ $? -ne 0 ]; then - echo "Failed to create configuration directory -- aborting." - cleanup_exit 1 - fi -} - - -# do_build builds the code. The version and commit must be passed in. -do_build() { - for b in ${BINS[*]}; do - rm -f $GOPATH_INSTALL/bin/$b - done - go get -u -f -d ./... - if [ $? -ne 0 ]; then - echo "WARNING: failed to 'go get' packages." - fi - - git checkout $TARGET_BRANCH # go get switches to master, so ensure we're back. - version=$1 - commit=`git rev-parse HEAD` - branch=`current_branch` - if [ $? -ne 0 ]; then - echo "Unable to retrieve current commit -- aborting" - cleanup_exit 1 - fi - - go install -a -ldflags="-X main.version $version -X main.branch $branch -X main.commit $commit" ./... - if [ $? -ne 0 ]; then - echo "Build failed, unable to create package -- aborting" - cleanup_exit 1 - fi - echo "Build completed successfully." -} - -# generate_postinstall_script creates the post-install script for the -# package. It must be passed the version. -generate_postinstall_script() { - version=$1 - cat <$POST_INSTALL_PATH -rm -f $INSTALL_ROOT_DIR/influxd -rm -f $INSTALL_ROOT_DIR/influx -rm -f $INSTALL_ROOT_DIR/init.sh -ln -s $INSTALL_ROOT_DIR/versions/$version/influxd $INSTALL_ROOT_DIR/influxd -ln -s $INSTALL_ROOT_DIR/versions/$version/influx $INSTALL_ROOT_DIR/influx -ln -s $INSTALL_ROOT_DIR/versions/$version/scripts/init.sh $INSTALL_ROOT_DIR/init.sh - -rm -f /etc/init.d/influxdb -ln -sfn $INSTALL_ROOT_DIR/init.sh /etc/init.d/influxdb -chmod +x /etc/init.d/influxdb -if which update-rc.d > /dev/null 2>&1 ; then - update-rc.d -f influxdb remove - update-rc.d influxdb defaults -else - chkconfig --add influxdb -fi - -if ! id influxdb >/dev/null 2>&1; then - useradd --system -U -M influxdb -fi -chown -R -L influxdb:influxdb $INSTALL_ROOT_DIR -chmod -R a+rX $INSTALL_ROOT_DIR - -mkdir -p $INFLUXDB_LOG_DIR -chown -R -L influxdb:influxdb $INFLUXDB_LOG_DIR -mkdir -p $INFLUXDB_DATA_DIR -chown -R -L influxdb:influxdb $INFLUXDB_DATA_DIR -EOF - echo "Post-install script created successfully at $POST_INSTALL_PATH" -} - -########################################################################### -# Start the packaging process. - -if [ $# -ne 1 ]; then - usage 1 -elif [ $1 == "-h" ]; then - usage 0 -else - VERSION=$1 - VERSION_UNDERSCORED=`echo "$VERSION" | tr - _` -fi - -echo -e "\nStarting package process...\n" - -# Ensure the current is correct. -TARGET_BRANCH=`current_branch` -if [ -z "$NIGHTLY_BUILD" ]; then -echo -n "Current branch is $TARGET_BRANCH. Start packaging this branch? [Y/n] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xn" ]; then - echo "Packaging aborted." - cleanup_exit 1 - fi -fi - -check_gvm -check_gopath -if [ -z "$NIGHTLY_BUILD" ]; then - check_clean_tree - update_tree - check_tag_exists $VERSION -fi - -do_build $VERSION -make_dir_tree $TMP_WORK_DIR $VERSION - -########################################################################### -# Copy the assets to the installation directories. - -for b in ${BINS[*]}; do - cp $GOPATH_INSTALL/bin/$b $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION - if [ $? -ne 0 ]; then - echo "Failed to copy binaries to packaging directory -- aborting." - cleanup_exit 1 - fi -done -echo "${BINS[*]} copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION" - -cp $INITD_SCRIPT $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts -if [ $? -ne 0 ]; then - echo "Failed to copy init.d script to packaging directory -- aborting." - cleanup_exit 1 -fi -echo "$INITD_SCRIPT copied to $TMP_WORK_DIR/$INSTALL_ROOT_DIR/versions/$VERSION/scripts" - -cp $SAMPLE_CONFIGURATION $TMP_WORK_DIR/$CONFIG_ROOT_DIR/influxdb.conf -if [ $? -ne 0 ]; then - echo "Failed to copy $SAMPLE_CONFIGURATION to packaging directory -- aborting." - cleanup_exit 1 -fi - -generate_postinstall_script $VERSION - -########################################################################### -# Create the actual packages. - -if [ -z "$NIGHTLY_BUILD" ]; then - echo -n "Commence creation of $ARCH packages, version $VERSION? [Y/n] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xn" ]; then - echo "Packaging aborted." - cleanup_exit 1 - fi -fi - -if [ $ARCH == "i386" ]; then - rpm_package=influxdb-${VERSION}-1.i686.rpm # RPM packages use 1 for default package release. - debian_package=influxdb_${VERSION}_i686.deb - deb_args="-a i686" - rpm_args="setarch i686" -elif [ $ARCH == "arm" ]; then - rpm_package=influxdb-${VERSION}-1.armel.rpm - debian_package=influxdb_${VERSION}_armel.deb -else - rpm_package=influxdb-${VERSION}-1.x86_64.rpm - debian_package=influxdb_${VERSION}_amd64.deb -fi - -COMMON_FPM_ARGS="-C $TMP_WORK_DIR --vendor $VENDOR --url $URL --license $LICENSE --maintainer $MAINTAINER --after-install $POST_INSTALL_PATH --name influxdb --version $VERSION --config-files $CONFIG_ROOT_DIR ." -$rpm_args $FPM -s dir -t rpm --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create RPM package -- aborting." - cleanup_exit 1 -fi -echo "RPM package created successfully." - -$FPM -s dir -t deb $deb_args --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create Debian package -- aborting." - cleanup_exit 1 -fi -echo "Debian package created successfully." - -$FPM -s dir -t tar --prefix influxdb_${VERSION}_${ARCH} -p influxdb_${VERSION}_${ARCH}.tar.gz --description "$DESCRIPTION" $COMMON_FPM_ARGS -if [ $? -ne 0 ]; then - echo "Failed to create Tar package -- aborting." - cleanup_exit 1 -fi -echo "Tar package created successfully." - -########################################################################### -# Offer to tag the repo. - -if [ -z "$NIGHTLY_BUILD" ]; then - echo -n "Tag source tree with v$VERSION and push to repo? [y/N] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` - if [ "x$response" == "xy" ]; then - echo "Creating tag v$VERSION and pushing to repo" - git tag v$VERSION - if [ $? -ne 0 ]; then - echo "Failed to create tag v$VERSION -- aborting" - cleanup_exit 1 - fi - git push origin v$VERSION - if [ $? -ne 0 ]; then - echo "Failed to push tag v$VERSION to repo -- aborting" - cleanup_exit 1 - fi - else - echo "Not creating tag v$VERSION." - fi -fi - -########################################################################### -# Offer to publish the packages. - -if [ -z "$NIGHTLY_BUILD" ]; then - echo -n "Publish packages to S3? [y/N] " - read response - response=`echo $response | tr 'A-Z' 'a-z'` -fi - -if [ "x$response" == "xy" -o -n "$NIGHTLY_BUILD" ]; then - echo "Publishing packages to S3." - if [ ! -e "$AWS_FILE" ]; then - echo "$AWS_FILE does not exist -- aborting." - cleanup_exit 1 - fi - - for filepath in `ls *.{deb,rpm,gz}`; do - filename=`basename $filepath` - if [ -n "$NIGHTLY_BUILD" ]; then - filename=`echo $filename | sed s/$VERSION/nightly/` - filename=`echo $filename | sed s/$VERSION_UNDERSCORED/nightly/` - fi - AWS_CONFIG_FILE=$AWS_FILE aws s3 cp $filepath s3://influxdb/$filename --acl public-read --region us-east-1 - if [ $? -ne 0 ]; then - echo "Upload failed -- aborting". - cleanup_exit 1 - fi - done -else - echo "Not publishing packages to S3." -fi - -########################################################################### -# All done. - -echo -e "\nPackaging process complete." -cleanup_exit 0 diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service deleted file mode 100644 index c164ed3b2..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/influxdb.service +++ /dev/null @@ -1,16 +0,0 @@ -# If you modify this, please also make sure to edit init.sh - -[Unit] -Description=InfluxDB is an open-source, distributed, time series database -After=network.target - -[Service] -User=influxdb -Group=influxdb -LimitNOFILE=65536 -EnvironmentFile=-/etc/default/influxdb -ExecStart=/opt/influxdb/influxd -config /etc/opt/influxdb/influxdb.conf $INFLUXD_OPTS -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh b/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh deleted file mode 100644 index 7e97b0def..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/scripts/init.sh +++ /dev/null @@ -1,205 +0,0 @@ -#! /usr/bin/env bash - -### BEGIN INIT INFO -# Provides: influxd -# Required-Start: $all -# Required-Stop: $remote_fs $syslog -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Start influxd at boot time -### END INIT INFO - -# If you modify this, please make sure to also edit influxdb.service -# this init script supports three different variations: -# 1. New lsb that define start-stop-daemon -# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc -# 3. Centos installations without lsb-core installed -# -# In the third case we have to define our own functions which are very dumb -# and expect the args to be positioned correctly. - -# Command-line options that can be set in /etc/default/influxdb. These will override -# any config file values. Example: "-join http://1.2.3.4:8086" -DEFAULT=/etc/default/influxdb - -# Daemon options -INFLUXD_OPTS= - -# Process name ( For display ) -NAME=influxdb - -# User and group -USER=influxdb -GROUP=influxdb - -# Daemon name, where is the actual executable -# If the daemon is not there, then exit. -DAEMON=/opt/influxdb/influxd -[ -x $DAEMON ] || exit 5 - -# Configuration file -CONFIG=/etc/opt/influxdb/influxdb.conf - -# PID file for the daemon -PIDFILE=/var/run/influxdb/influxd.pid -PIDDIR=`dirname $PIDFILE` -if [ ! -d "$PIDDIR" ]; then - mkdir -p $PIDDIR - chown $GROUP:$USER $PIDDIR -fi - -# Max open files -OPEN_FILE_LIMIT=65536 - -if [ -r /lib/lsb/init-functions ]; then - source /lib/lsb/init-functions -fi - -# Logging -if [ -z "$STDOUT" ]; then - STDOUT=/dev/null -fi - -if [ ! -f "$STDOUT" ]; then - mkdir -p $(dirname $STDOUT) -fi - -if [ -z "$STDERR" ]; then - STDERR=/var/log/influxdb/influxd.log -fi - -if [ ! -f "$STDERR" ]; then - mkdir -p $(dirname $STDERR) -fi - -# Overwrite init script variables with /etc/default/influxdb values -if [ -r $DEFAULT ]; then - source $DEFAULT -fi - -function pidofproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile daemon-name" - fi - - PID=`pgrep -f $3` - local PIDFILE=`cat $2` - - if [ "x$PIDFILE" == "x" ]; then - return 1 - fi - - if [ "x$PID" != "x" -a "$PIDFILE" == "$PID" ]; then - return 0 - fi - - return 1 -} - -function killproc() { - if [ $# -ne 3 ]; then - echo "Expected three arguments, e.g. $0 -p pidfile signal" - fi - - PID=`cat $2` - - kill -s $3 $PID -} - -function log_failure_msg() { - echo "$@" "[ FAILED ]" -} - -function log_success_msg() { - echo "$@" "[ OK ]" -} - -case $1 in - start) - # Check if config file exist - if [ ! -r $CONFIG ]; then - log_failure_msg "config file doesn't exists" - exit 4 - fi - - # Checked the PID file exists and check the actual status of process - if [ -e $PIDFILE ]; then - pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?" - # If the status is SUCCESS then don't need to start again. - if [ "x$STATUS" = "x0" ]; then - log_failure_msg "$NAME process is running" - exit 0 # Exit - fi - # if PID file does not exist, check if writable - else - su -c "touch $PIDFILE" $USER > /dev/null 2>&1 - if [ $? -ne 0 ]; then - log_failure_msg "$PIDFILE not writable, check permissions" - exit 5 - fi - fi - - # Bump the file limits, before launching the daemon. These will carry over to - # launched processes. - ulimit -n $OPEN_FILE_LIMIT - if [ $? -ne 0 ]; then - log_failure_msg "set open file limit to $OPEN_FILE_LIMIT" - exit 1 - fi - - log_success_msg "Starting the process" "$NAME" - if which start-stop-daemon > /dev/null 2>&1; then - start-stop-daemon --chuid $GROUP:$USER --start --quiet --pidfile $PIDFILE --exec $DAEMON -- -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & - else - nohup $DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & - fi - log_success_msg "$NAME process was started" - ;; - - stop) - # Stop the daemon. - if [ -e $PIDFILE ]; then - pidofproc -p $PIDFILE $DAEMON > /dev/null 2>&1 && STATUS="0" || STATUS="$?" - if [ "$STATUS" = 0 ]; then - if killproc -p $PIDFILE SIGTERM && /bin/rm -rf $PIDFILE; then - log_success_msg "$NAME process was stopped" - else - log_failure_msg "$NAME failed to stop service" - fi - fi - else - log_failure_msg "$NAME process is not running" - fi - ;; - - restart) - # Restart the daemon. - $0 stop && sleep 2 && $0 start - ;; - - status) - # Check the status of the process. - if [ -e $PIDFILE ]; then - if pidofproc -p $PIDFILE $DAEMON > /dev/null; then - log_success_msg "$NAME Process is running" - exit 0 - else - log_failure_msg "$NAME Process is not running" - exit 1 - fi - else - log_failure_msg "$NAME Process is not running" - exit 3 - fi - ;; - - version) - $DAEMON version - ;; - - *) - # For invalid arguments, print the usage message. - echo "Usage: $0 {start|stop|restart|status|version}" - exit 2 - ;; -esac diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go deleted file mode 100644 index 860dff864..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config.go +++ /dev/null @@ -1,21 +0,0 @@ -package admin - -const ( - // DefaultBindAddress is the default bind address for the HTTP server. - DefaultBindAddress = ":8083" -) - -type Config struct { - Enabled bool `toml:"enabled"` - BindAddress string `toml:"bind-address"` - HttpsEnabled bool `toml:"https-enabled"` - HttpsCertificate string `toml:"https-certificate"` -} - -func NewConfig() Config { - return Config{ - BindAddress: DefaultBindAddress, - HttpsEnabled: false, - HttpsCertificate: "/etc/ssl/influxdb.pem", - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go deleted file mode 100644 index 1b0422505..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/config_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package admin_test - -import ( - "testing" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/services/admin" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c admin.Config - if _, err := toml.Decode(` -enabled = true -bind-address = ":8083" -https-enabled = true -https-certificate = "/dev/null" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.Enabled != true { - t.Fatalf("unexpected enabled: %v", c.Enabled) - } else if c.BindAddress != ":8083" { - t.Fatalf("unexpected bind address: %s", c.BindAddress) - } else if c.HttpsEnabled != true { - t.Fatalf("unexpected https enabled: %v", c.HttpsEnabled) - } else if c.HttpsCertificate != "/dev/null" { - t.Fatalf("unexpected https certificate: %v", c.HttpsCertificate) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go deleted file mode 100644 index 2618bdb6b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service.go +++ /dev/null @@ -1,111 +0,0 @@ -package admin - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "net/http" - "os" - "strings" - - // Register static assets via statik. - _ "github.com/influxdb/influxdb/statik" - "github.com/rakyll/statik/fs" -) - -// Service manages the listener for an admin endpoint. -type Service struct { - listener net.Listener - addr string - https bool - cert string - err chan error - - logger *log.Logger -} - -// NewService returns a new instance of Service. -func NewService(c Config) *Service { - return &Service{ - addr: c.BindAddress, - https: c.HttpsEnabled, - cert: c.HttpsCertificate, - err: make(chan error), - logger: log.New(os.Stderr, "[admin] ", log.LstdFlags), - } -} - -// Open starts the service -func (s *Service) Open() error { - s.logger.Printf("Starting admin service") - - // Open listener. - if s.https { - cert, err := tls.LoadX509KeyPair(s.cert, s.cert) - if err != nil { - return err - } - - listener, err := tls.Listen("tcp", s.addr, &tls.Config{ - Certificates: []tls.Certificate{cert}, - }) - if err != nil { - return err - } - - s.logger.Println("Listening on HTTPS:", listener.Addr().String()) - s.listener = listener - } else { - listener, err := net.Listen("tcp", s.addr) - if err != nil { - return err - } - - s.logger.Println("Listening on HTTP:", listener.Addr().String()) - s.listener = listener - } - - // Begin listening for requests in a separate goroutine. - go s.serve() - return nil -} - -// Close closes the underlying listener. -func (s *Service) Close() error { - if s.listener != nil { - return s.listener.Close() - } - return nil -} - -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.logger = l -} - -// Err returns a channel for fatal errors that occur on the listener. -func (s *Service) Err() <-chan error { return s.err } - -// Addr returns the listener's address. Returns nil if listener is closed. -func (s *Service) Addr() net.Addr { - if s.listener != nil { - return s.listener.Addr() - } - return nil -} - -// serve serves the handler from the listener. -func (s *Service) serve() { - // Instantiate file system from embedded admin. - statikFS, err := fs.New() - if err != nil { - panic(err) - } - - // Run file system handler on listener. - err = http.Serve(s.listener, http.FileServer(statikFS)) - if err != nil && !strings.Contains(err.Error(), "closed") { - s.err <- fmt.Errorf("listener error: addr=%s, err=%s", s.Addr(), err) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go deleted file mode 100644 index 497b12ea3..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/admin/service_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package admin_test - -import ( - "io/ioutil" - "net/http" - "testing" - - "github.com/influxdb/influxdb/services/admin" -) - -// Ensure service can serve the root index page of the admin. -func TestService_Index(t *testing.T) { - // Start service on random port. - s := admin.NewService(admin.Config{BindAddress: "127.0.0.1:0"}) - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer s.Close() - - // Request root index page. - resp, err := http.Get("http://" + s.Addr().String()) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - // Validate status code and body. - if resp.StatusCode != http.StatusOK { - t.Fatalf("unexpected status: %d", resp.StatusCode) - } else if _, err := ioutil.ReadAll(resp.Body); err != nil { - t.Fatalf("unable to read body: %s", err) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf deleted file mode 100644 index 97cc4cc08..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/collectd_test.conf +++ /dev/null @@ -1,209 +0,0 @@ -absolute value:ABSOLUTE:0:U -apache_bytes value:DERIVE:0:U -apache_connections value:GAUGE:0:65535 -apache_idle_workers value:GAUGE:0:65535 -apache_requests value:DERIVE:0:U -apache_scoreboard value:GAUGE:0:65535 -ath_nodes value:GAUGE:0:65535 -ath_stat value:DERIVE:0:U -backends value:GAUGE:0:65535 -bitrate value:GAUGE:0:4294967295 -bytes value:GAUGE:0:U -cache_eviction value:DERIVE:0:U -cache_operation value:DERIVE:0:U -cache_ratio value:GAUGE:0:100 -cache_result value:DERIVE:0:U -cache_size value:GAUGE:0:U -charge value:GAUGE:0:U -compression_ratio value:GAUGE:0:2 -compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U -connections value:DERIVE:0:U -conntrack value:GAUGE:0:4294967295 -contextswitch value:DERIVE:0:U -counter value:COUNTER:U:U -cpufreq value:GAUGE:0:U -cpu value:DERIVE:0:U -current_connections value:GAUGE:0:U -current_sessions value:GAUGE:0:U -current value:GAUGE:U:U -delay value:GAUGE:-1000000:1000000 -derive value:DERIVE:0:U -df_complex value:GAUGE:0:U -df_inodes value:GAUGE:0:U -df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 -disk_latency read:GAUGE:0:U, write:GAUGE:0:U -disk_merged read:DERIVE:0:U, write:DERIVE:0:U -disk_octets read:DERIVE:0:U, write:DERIVE:0:U -disk_ops_complex value:DERIVE:0:U -disk_ops read:DERIVE:0:U, write:DERIVE:0:U -disk_time read:DERIVE:0:U, write:DERIVE:0:U -dns_answer value:DERIVE:0:U -dns_notify value:DERIVE:0:U -dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U -dns_opcode value:DERIVE:0:U -dns_qtype_cached value:GAUGE:0:4294967295 -dns_qtype value:DERIVE:0:U -dns_query value:DERIVE:0:U -dns_question value:DERIVE:0:U -dns_rcode value:DERIVE:0:U -dns_reject value:DERIVE:0:U -dns_request value:DERIVE:0:U -dns_resolver value:DERIVE:0:U -dns_response value:DERIVE:0:U -dns_transfer value:DERIVE:0:U -dns_update value:DERIVE:0:U -dns_zops value:DERIVE:0:U -duration seconds:GAUGE:0:U -email_check value:GAUGE:0:U -email_count value:GAUGE:0:U -email_size value:GAUGE:0:U -entropy value:GAUGE:0:4294967295 -fanspeed value:GAUGE:0:U -file_size value:GAUGE:0:U -files value:GAUGE:0:U -flow value:GAUGE:0:U -fork_rate value:DERIVE:0:U -frequency_offset value:GAUGE:-1000000:1000000 -frequency value:GAUGE:0:U -fscache_stat value:DERIVE:0:U -gauge value:GAUGE:U:U -hash_collisions value:DERIVE:0:U -http_request_methods value:DERIVE:0:U -http_requests value:DERIVE:0:U -http_response_codes value:DERIVE:0:U -humidity value:GAUGE:0:100 -if_collisions value:DERIVE:0:U -if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U -if_errors rx:DERIVE:0:U, tx:DERIVE:0:U -if_multicast value:DERIVE:0:U -if_octets rx:DERIVE:0:U, tx:DERIVE:0:U -if_packets rx:DERIVE:0:U, tx:DERIVE:0:U -if_rx_errors value:DERIVE:0:U -if_rx_octets value:DERIVE:0:U -if_tx_errors value:DERIVE:0:U -if_tx_octets value:DERIVE:0:U -invocations value:DERIVE:0:U -io_octets rx:DERIVE:0:U, tx:DERIVE:0:U -io_packets rx:DERIVE:0:U, tx:DERIVE:0:U -ipt_bytes value:DERIVE:0:U -ipt_packets value:DERIVE:0:U -irq value:DERIVE:0:U -latency value:GAUGE:0:U -links value:GAUGE:0:U -load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 -md_disks value:GAUGE:0:U -memcached_command value:DERIVE:0:U -memcached_connections value:GAUGE:0:U -memcached_items value:GAUGE:0:U -memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U -memcached_ops value:DERIVE:0:U -memory value:GAUGE:0:281474976710656 -multimeter value:GAUGE:U:U -mutex_operations value:DERIVE:0:U -mysql_commands value:DERIVE:0:U -mysql_handler value:DERIVE:0:U -mysql_locks value:DERIVE:0:U -mysql_log_position value:DERIVE:0:U -mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U -nfs_procedure value:DERIVE:0:U -nginx_connections value:GAUGE:0:U -nginx_requests value:DERIVE:0:U -node_octets rx:DERIVE:0:U, tx:DERIVE:0:U -node_rssi value:GAUGE:0:255 -node_stat value:DERIVE:0:U -node_tx_rate value:GAUGE:0:127 -objects value:GAUGE:0:U -operations value:DERIVE:0:U -percent value:GAUGE:0:100.1 -percent_bytes value:GAUGE:0:100.1 -percent_inodes value:GAUGE:0:100.1 -pf_counters value:DERIVE:0:U -pf_limits value:DERIVE:0:U -pf_source value:DERIVE:0:U -pf_states value:GAUGE:0:U -pf_state value:DERIVE:0:U -pg_blks value:DERIVE:0:U -pg_db_size value:GAUGE:0:U -pg_n_tup_c value:DERIVE:0:U -pg_n_tup_g value:GAUGE:0:U -pg_numbackends value:GAUGE:0:U -pg_scan value:DERIVE:0:U -pg_xact value:DERIVE:0:U -ping_droprate value:GAUGE:0:100 -ping_stddev value:GAUGE:0:65535 -ping value:GAUGE:0:65535 -players value:GAUGE:0:1000000 -power value:GAUGE:0:U -protocol_counter value:DERIVE:0:U -ps_code value:GAUGE:0:9223372036854775807 -ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 -ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U -ps_data value:GAUGE:0:9223372036854775807 -ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U -ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U -ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U -ps_rss value:GAUGE:0:9223372036854775807 -ps_stacksize value:GAUGE:0:9223372036854775807 -ps_state value:GAUGE:0:65535 -ps_vm value:GAUGE:0:9223372036854775807 -queue_length value:GAUGE:0:U -records value:GAUGE:0:U -requests value:GAUGE:0:U -response_time value:GAUGE:0:U -response_code value:GAUGE:0:U -route_etx value:GAUGE:0:U -route_metric value:GAUGE:0:U -routes value:GAUGE:0:U -serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U -signal_noise value:GAUGE:U:0 -signal_power value:GAUGE:U:0 -signal_quality value:GAUGE:0:U -snr value:GAUGE:0:U -spam_check value:GAUGE:0:U -spam_score value:GAUGE:U:U -spl value:GAUGE:U:U -swap_io value:DERIVE:0:U -swap value:GAUGE:0:1099511627776 -tcp_connections value:GAUGE:0:4294967295 -temperature value:GAUGE:U:U -threads value:GAUGE:0:U -time_dispersion value:GAUGE:-1000000:1000000 -timeleft value:GAUGE:0:U -time_offset value:GAUGE:-1000000:1000000 -total_bytes value:DERIVE:0:U -total_connections value:DERIVE:0:U -total_objects value:DERIVE:0:U -total_operations value:DERIVE:0:U -total_requests value:DERIVE:0:U -total_sessions value:DERIVE:0:U -total_threads value:DERIVE:0:U -total_time_in_ms value:DERIVE:0:U -total_values value:DERIVE:0:U -uptime value:GAUGE:0:4294967295 -users value:GAUGE:0:65535 -vcl value:GAUGE:0:65535 -vcpu value:GAUGE:0:U -virt_cpu_total value:DERIVE:0:U -virt_vcpu value:DERIVE:0:U -vmpage_action value:DERIVE:0:U -vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U -vmpage_io in:DERIVE:0:U, out:DERIVE:0:U -vmpage_number value:GAUGE:0:4294967295 -volatile_changes value:GAUGE:0:U -voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U -voltage value:GAUGE:U:U -vs_memory value:GAUGE:0:9223372036854775807 -vs_processes value:GAUGE:0:65535 -vs_threads value:GAUGE:0:65535 - -# -# Legacy types -# (required for the v5 upgrade target) -# -arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U -arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U -arc_l2_size value:GAUGE:0:U -arc_ratio value:GAUGE:0:U -arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U -mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U -mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go deleted file mode 100644 index 3129c3378..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config.go +++ /dev/null @@ -1,44 +0,0 @@ -package collectd - -import ( - "time" - - "github.com/influxdb/influxdb/toml" -) - -const ( - DefaultBindAddress = ":25826" - - DefaultDatabase = "collectd" - - DefaultRetentionPolicy = "" - - DefaultBatchSize = 5000 - - DefaultBatchDuration = toml.Duration(10 * time.Second) - - DefaultTypesDB = "/usr/share/collectd/types.db" -) - -// Config represents a configuration for the collectd service. -type Config struct { - Enabled bool `toml:"enabled"` - BindAddress string `toml:"bind-address"` - Database string `toml:"database"` - RetentionPolicy string `toml:"retention-policy"` - BatchSize int `toml:"batch-size"` - BatchDuration toml.Duration `toml:"batch-timeout"` - TypesDB string `toml:"typesdb"` -} - -// NewConfig returns a new instance of Config with defaults. -func NewConfig() Config { - return Config{ - BindAddress: DefaultBindAddress, - Database: DefaultDatabase, - RetentionPolicy: DefaultRetentionPolicy, - BatchSize: DefaultBatchSize, - BatchDuration: DefaultBatchDuration, - TypesDB: DefaultTypesDB, - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go deleted file mode 100644 index c419dcfa9..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/config_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package collectd_test - -import ( - "testing" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/services/collectd" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c collectd.Config - if _, err := toml.Decode(` -enabled = true -bind-address = ":9000" -database = "xxx" -typesdb = "yyy" -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.Enabled != true { - t.Fatalf("unexpected enabled: %v", c.Enabled) - } else if c.BindAddress != ":9000" { - t.Fatalf("unexpected bind address: %s", c.BindAddress) - } else if c.Database != "xxx" { - t.Fatalf("unexpected database: %s", c.Database) - } else if c.TypesDB != "yyy" { - t.Fatalf("unexpected types db: %s", c.TypesDB) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go deleted file mode 100644 index b0fee0bc4..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service.go +++ /dev/null @@ -1,278 +0,0 @@ -package collectd - -import ( - "fmt" - "log" - "net" - "os" - "sync" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/tsdb" - "github.com/kimor79/gollectd" -) - -const leaderWaitTimeout = 30 * time.Second - -// pointsWriter is an internal interface to make testing easier. -type pointsWriter interface { - WritePoints(p *cluster.WritePointsRequest) error -} - -// metaStore is an internal interface to make testing easier. -type metaStore interface { - WaitForLeader(d time.Duration) error - CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) -} - -// Service represents a UDP server which receives metrics in collectd's binary -// protocol and stores them in InfluxDB. -type Service struct { - Config *Config - MetaStore metaStore - PointsWriter pointsWriter - Logger *log.Logger - - wg sync.WaitGroup - err chan error - stop chan struct{} - ln *net.UDPConn - batcher *tsdb.PointBatcher - typesdb gollectd.Types - addr net.Addr -} - -// NewService returns a new instance of the collectd service. -func NewService(c Config) *Service { - s := &Service{ - Config: &c, - Logger: log.New(os.Stderr, "[collectd] ", log.LstdFlags), - err: make(chan error), - } - - return s -} - -// Open starts the service. -func (s *Service) Open() error { - s.Logger.Printf("Starting collectd service") - - if s.Config.BindAddress == "" { - return fmt.Errorf("bind address is blank") - } else if s.Config.Database == "" { - return fmt.Errorf("database name is blank") - } else if s.PointsWriter == nil { - return fmt.Errorf("PointsWriter is nil") - } - - if err := s.MetaStore.WaitForLeader(leaderWaitTimeout); err != nil { - s.Logger.Printf("Failed to detect a cluster leader: %s", err.Error()) - return err - } - - if _, err := s.MetaStore.CreateDatabaseIfNotExists(s.Config.Database); err != nil { - s.Logger.Printf("Failed to ensure target database %s exists: %s", s.Config.Database, err.Error()) - return err - } - - if s.typesdb == nil { - // Open collectd types. - typesdb, err := gollectd.TypesDBFile(s.Config.TypesDB) - if err != nil { - return fmt.Errorf("Open(): %s", err) - } - s.typesdb = typesdb - } - - // Resolve our address. - addr, err := net.ResolveUDPAddr("udp", s.Config.BindAddress) - if err != nil { - return fmt.Errorf("unable to resolve UDP address: %s", err) - } - s.addr = addr - - // Start listening - ln, err := net.ListenUDP("udp", addr) - if err != nil { - return fmt.Errorf("unable to listen on UDP: %s", err) - } - s.ln = ln - - s.Logger.Println("Listening on UDP: ", ln.LocalAddr().String()) - - // Start the points batcher. - s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, time.Duration(s.Config.BatchDuration)) - s.batcher.Start() - - // Create channel and wait group for signalling goroutines to stop. - s.stop = make(chan struct{}) - s.wg.Add(2) - - // Start goroutines that process collectd packets. - go s.serve() - go s.writePoints() - - return nil -} - -// Close stops the service. -func (s *Service) Close() error { - // Close the connection, and wait for the goroutine to exit. - if s.stop != nil { - close(s.stop) - } - if s.ln != nil { - s.ln.Close() - } - if s.batcher != nil { - s.batcher.Stop() - } - s.wg.Wait() - - // Release all remaining resources. - s.stop = nil - s.ln = nil - s.batcher = nil - s.Logger.Println("collectd UDP closed") - return nil -} - -// SetLogger sets the internal logger to the logger passed in. -func (s *Service) SetLogger(l *log.Logger) { - s.Logger = l -} - -// SetTypes sets collectd types db. -func (s *Service) SetTypes(types string) (err error) { - s.typesdb, err = gollectd.TypesDB([]byte(types)) - return -} - -// Err returns a channel for fatal errors that occur on go routines. -func (s *Service) Err() chan error { return s.err } - -// Addr returns the listener's address. Returns nil if listener is closed. -func (s *Service) Addr() net.Addr { - return s.ln.LocalAddr() -} - -func (s *Service) serve() { - defer s.wg.Done() - - // From https://collectd.org/wiki/index.php/Binary_protocol - // 1024 bytes (payload only, not including UDP / IP headers) - // In versions 4.0 through 4.7, the receive buffer has a fixed size - // of 1024 bytes. When longer packets are received, the trailing data - // is simply ignored. Since version 4.8, the buffer size can be - // configured. Version 5.0 will increase the default buffer size to - // 1452 bytes (the maximum payload size when using UDP/IPv6 over - // Ethernet). - buffer := make([]byte, 1452) - - for { - select { - case <-s.stop: - // We closed the connection, time to go. - return - default: - // Keep processing. - } - - n, _, err := s.ln.ReadFromUDP(buffer) - if err != nil { - s.Logger.Printf("collectd ReadFromUDP error: %s", err) - continue - } - if n > 0 { - s.handleMessage(buffer[:n]) - } - } -} - -func (s *Service) handleMessage(buffer []byte) { - packets, err := gollectd.Packets(buffer, s.typesdb) - if err != nil { - s.Logger.Printf("Collectd parse error: %s", err) - return - } - for _, packet := range *packets { - points := Unmarshal(&packet) - for _, p := range points { - s.batcher.In() <- p - } - } -} - -func (s *Service) writePoints() { - defer s.wg.Done() - - for { - select { - case <-s.stop: - return - case batch := <-s.batcher.Out(): - req := &cluster.WritePointsRequest{ - Database: s.Config.Database, - RetentionPolicy: s.Config.RetentionPolicy, - ConsistencyLevel: cluster.ConsistencyLevelAny, - Points: batch, - } - if err := s.PointsWriter.WritePoints(req); err != nil { - s.Logger.Printf("failed to write batch: %s", err) - continue - } - } - } -} - -// Unmarshal translates a collectd packet into InfluxDB data points. -func Unmarshal(packet *gollectd.Packet) []tsdb.Point { - // Prefer high resolution timestamp. - var timestamp time.Time - if packet.TimeHR > 0 { - // TimeHR is "near" nanosecond measurement, but not exactly nanasecond time - // Since we store time in microseconds, we round here (mostly so tests will work easier) - sec := packet.TimeHR >> 30 - // Shifting, masking, and dividing by 1 billion to get nanoseconds. - nsec := ((packet.TimeHR & 0x3FFFFFFF) << 30) / 1000 / 1000 / 1000 - timestamp = time.Unix(int64(sec), int64(nsec)).UTC().Round(time.Microsecond) - } else { - // If we don't have high resolution time, fall back to basic unix time - timestamp = time.Unix(int64(packet.Time), 0).UTC() - } - - var points []tsdb.Point - for i := range packet.Values { - name := fmt.Sprintf("%s_%s", packet.Plugin, packet.Values[i].Name) - tags := make(map[string]string) - fields := make(map[string]interface{}) - - fields["value"] = packet.Values[i].Value - - if packet.Hostname != "" { - tags["host"] = packet.Hostname - } - if packet.PluginInstance != "" { - tags["instance"] = packet.PluginInstance - } - if packet.Type != "" { - tags["type"] = packet.Type - } - if packet.TypeInstance != "" { - tags["type_instance"] = packet.TypeInstance - } - p := tsdb.NewPoint(name, tags, fields, timestamp) - - points = append(points, p) - } - return points -} - -// assert will panic with a given formatted message if the given condition is false. -func assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assert failed: "+msg, v...)) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go deleted file mode 100644 index a24c9d29f..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/service_test.go +++ /dev/null @@ -1,501 +0,0 @@ -package collectd - -import ( - "encoding/hex" - "errors" - "io/ioutil" - "log" - "net" - "testing" - "time" - - "github.com/influxdb/influxdb/cluster" - "github.com/influxdb/influxdb/meta" - "github.com/influxdb/influxdb/toml" - "github.com/influxdb/influxdb/tsdb" -) - -// Test that the service checks / creates the target database on startup. -func TestService_CreatesDatabase(t *testing.T) { - t.Parallel() - - s := newTestService(1, time.Second) - - createDatabaseCalled := false - - ms := &testMetaStore{} - ms.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { - if name != s.Config.Database { - t.Errorf("\n\texp = %s\n\tgot = %s\n", s.Config.Database, name) - } - createDatabaseCalled = true - return nil, nil - } - s.Service.MetaStore = ms - - s.Open() - s.Close() - - if !createDatabaseCalled { - t.Errorf("CreateDatabaseIfNotExists should have been called when the service opened.") - } -} - -// Test that the collectd service correctly batches points by BatchSize. -func TestService_BatchSize(t *testing.T) { - t.Parallel() - - totalPoints := len(expPoints) - - // Batch sizes that totalTestPoints divide evenly by. - batchSizes := []int{1, 2, 13} - - for _, batchSize := range batchSizes { - func() { - s := newTestService(batchSize, time.Second) - - pointCh := make(chan tsdb.Point) - s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil } - s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error { - if len(req.Points) != batchSize { - t.Errorf("\n\texp = %d\n\tgot = %d\n", batchSize, len(req.Points)) - } - - for _, p := range req.Points { - pointCh <- p - } - return nil - } - - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer func() { t.Log("closing service"); s.Close() }() - - // Get the address & port the service is listening on for collectd data. - addr := s.Addr() - conn, err := net.Dial("udp", addr.String()) - if err != nil { - t.Fatal(err) - } - - // Send the test data to the service. - if n, err := conn.Write(testData); err != nil { - t.Fatal(err) - } else if n != len(testData) { - t.Fatalf("only sent %d of %d bytes", n, len(testData)) - } - - points := []tsdb.Point{} - Loop: - for { - select { - case p := <-pointCh: - points = append(points, p) - if len(points) == totalPoints { - break Loop - } - case <-time.After(time.Second): - t.Logf("exp %d points, got %d", totalPoints, len(points)) - t.Fatal("timed out waiting for points from collectd service") - } - } - - if len(points) != totalPoints { - t.Fatalf("exp %d points, got %d", totalPoints, len(points)) - } - - for i, exp := range expPoints { - got := points[i].String() - if got != exp { - t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) - } - } - }() - } -} - -// Test that the collectd service correctly batches points using BatchDuration. -func TestService_BatchDuration(t *testing.T) { - t.Parallel() - - totalPoints := len(expPoints) - - s := newTestService(5000, 250*time.Millisecond) - - pointCh := make(chan tsdb.Point, 1000) - s.MetaStore.CreateDatabaseIfNotExistsFn = func(name string) (*meta.DatabaseInfo, error) { return nil, nil } - s.PointsWriter.WritePointsFn = func(req *cluster.WritePointsRequest) error { - for _, p := range req.Points { - pointCh <- p - } - return nil - } - - if err := s.Open(); err != nil { - t.Fatal(err) - } - defer func() { t.Log("closing service"); s.Close() }() - - // Get the address & port the service is listening on for collectd data. - addr := s.Addr() - conn, err := net.Dial("udp", addr.String()) - if err != nil { - t.Fatal(err) - } - - // Send the test data to the service. - if n, err := conn.Write(testData); err != nil { - t.Fatal(err) - } else if n != len(testData) { - t.Fatalf("only sent %d of %d bytes", n, len(testData)) - } - - points := []tsdb.Point{} -Loop: - for { - select { - case p := <-pointCh: - points = append(points, p) - if len(points) == totalPoints { - break Loop - } - case <-time.After(time.Second): - t.Logf("exp %d points, got %d", totalPoints, len(points)) - t.Fatal("timed out waiting for points from collectd service") - } - } - - if len(points) != totalPoints { - t.Fatalf("exp %d points, got %d", totalPoints, len(points)) - } - - for i, exp := range expPoints { - got := points[i].String() - if got != exp { - t.Fatalf("\n\texp = %s\n\tgot = %s\n", exp, got) - } - } -} - -type testService struct { - *Service - MetaStore testMetaStore - PointsWriter testPointsWriter -} - -func newTestService(batchSize int, batchDuration time.Duration) *testService { - s := &testService{ - Service: NewService(Config{ - BindAddress: "127.0.0.1:0", - Database: "collectd_test", - BatchSize: batchSize, - BatchDuration: toml.Duration(batchDuration), - }), - } - s.Service.PointsWriter = &s.PointsWriter - s.Service.MetaStore = &s.MetaStore - - // Set the collectd types using test string. - if err := s.SetTypes(typesDBText); err != nil { - panic(err) - } - - if !testing.Verbose() { - s.Logger = log.New(ioutil.Discard, "", log.LstdFlags) - } - - return s -} - -type testPointsWriter struct { - WritePointsFn func(*cluster.WritePointsRequest) error -} - -func (w *testPointsWriter) WritePoints(p *cluster.WritePointsRequest) error { - return w.WritePointsFn(p) -} - -type testMetaStore struct { - CreateDatabaseIfNotExistsFn func(name string) (*meta.DatabaseInfo, error) - //DatabaseFn func(name string) (*meta.DatabaseInfo, error) -} - -func (ms *testMetaStore) CreateDatabaseIfNotExists(name string) (*meta.DatabaseInfo, error) { - return ms.CreateDatabaseIfNotExistsFn(name) -} - -func (ms *testMetaStore) WaitForLeader(d time.Duration) error { - return nil -} - -func wait(c chan struct{}, d time.Duration) (err error) { - select { - case <-c: - case <-time.After(d): - err = errors.New("timed out") - } - return -} - -func waitInt(c chan int, d time.Duration) (i int, err error) { - select { - case i = <-c: - case <-time.After(d): - err = errors.New("timed out") - } - return -} - -func check(err error) { - if err != nil { - panic(err) - } -} - -// Raw data sent by collectd, captured using Wireshark. -var testData = func() []byte { - b, err := hex.DecodeString("000000167066312d36322d3231302d39342d313733000001000c00000000544928ff0007000c00000000000000050002000c656e74726f7079000004000c656e74726f7079000006000f0001010000000000007240000200086370750000030006310000040008637075000005000969646c65000006000f0001000000000000a674620005000977616974000006000f0001000000000000000000000200076466000003000500000400076466000005000d6c6976652d636f7700000600180002010100000000a090b641000000a0cb6a2742000200086370750000030006310000040008637075000005000e696e74657272757074000006000f00010000000000000000fe0005000c736f6674697271000006000f000100000000000000000000020007646600000300050000040007646600000500096c6976650000060018000201010000000000000000000000e0ec972742000200086370750000030006310000040008637075000005000a737465616c000006000f00010000000000000000000003000632000005000975736572000006000f0001000000000000005f36000500096e696365000006000f0001000000000000000ad80002000e696e746572666163650000030005000004000e69665f6f6374657473000005000b64756d6d79300000060018000200000000000000000000000000000000041a000200076466000004000764660000050008746d70000006001800020101000000000000f240000000a0ea972742000200086370750000030006320000040008637075000005000b73797374656d000006000f00010000000000000045d30002000e696e746572666163650000030005000004000f69665f7061636b657473000005000b64756d6d79300000060018000200000000000000000000000000000000000f000200086370750000030006320000040008637075000005000969646c65000006000f0001000000000000a66480000200076466000003000500000400076466000005000d72756e2d6c6f636b000006001800020101000000000000000000000000000054410002000e696e74657266616365000004000e69665f6572726f7273000005000b64756d6d793000000600180002000000000000000000000000000000000000000200086370750000030006320000040008637075000005000977616974000006000f00010000000000000000000005000e696e74657272757074000006000f0001000000000000000132") - check(err) - return b -}() - -var expPoints = []string{ - "entropy_value,host=pf1-62-210-94-173,type=entropy value=288.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=idle value=10908770.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=wait value=0.0 1414080767000000000", - "df_used,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=378576896.0 1414080767000000000", - "df_free,host=pf1-62-210-94-173,type=df,type_instance=live-cow value=50287988736.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=interrupt value=254.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=softirq value=0.0 1414080767000000000", - "df_used,host=pf1-62-210-94-173,type=df,type_instance=live value=0.0 1414080767000000000", - "df_free,host=pf1-62-210-94-173,type=df,type_instance=live value=50666565632.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=1,type=cpu,type_instance=steal value=0.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=user value=24374.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=nice value=2776.0 1414080767000000000", - "interface_rx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=0.0 1414080767000000000", - "interface_tx,host=pf1-62-210-94-173,type=if_octets,type_instance=dummy0 value=1050.0 1414080767000000000", - "df_used,host=pf1-62-210-94-173,type=df,type_instance=tmp value=73728.0 1414080767000000000", - "df_free,host=pf1-62-210-94-173,type=df,type_instance=tmp value=50666491904.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=system value=17875.0 1414080767000000000", - "interface_rx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=0.0 1414080767000000000", - "interface_tx,host=pf1-62-210-94-173,type=if_packets,type_instance=dummy0 value=15.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=idle value=10904704.0 1414080767000000000", - "df_used,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=0.0 1414080767000000000", - "df_free,host=pf1-62-210-94-173,type=df,type_instance=run-lock value=5242880.0 1414080767000000000", - "interface_rx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000", - "interface_tx,host=pf1-62-210-94-173,type=if_errors,type_instance=dummy0 value=0.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=wait value=0.0 1414080767000000000", - "cpu_value,host=pf1-62-210-94-173,instance=2,type=cpu,type_instance=interrupt value=306.0 1414080767000000000", -} - -// Taken from /usr/share/collectd/types.db on a Ubuntu system -var typesDBText = ` -absolute value:ABSOLUTE:0:U -apache_bytes value:DERIVE:0:U -apache_connections value:GAUGE:0:65535 -apache_idle_workers value:GAUGE:0:65535 -apache_requests value:DERIVE:0:U -apache_scoreboard value:GAUGE:0:65535 -ath_nodes value:GAUGE:0:65535 -ath_stat value:DERIVE:0:U -backends value:GAUGE:0:65535 -bitrate value:GAUGE:0:4294967295 -bytes value:GAUGE:0:U -cache_eviction value:DERIVE:0:U -cache_operation value:DERIVE:0:U -cache_ratio value:GAUGE:0:100 -cache_result value:DERIVE:0:U -cache_size value:GAUGE:0:4294967295 -charge value:GAUGE:0:U -compression_ratio value:GAUGE:0:2 -compression uncompressed:DERIVE:0:U, compressed:DERIVE:0:U -connections value:DERIVE:0:U -conntrack value:GAUGE:0:4294967295 -contextswitch value:DERIVE:0:U -counter value:COUNTER:U:U -cpufreq value:GAUGE:0:U -cpu value:DERIVE:0:U -current_connections value:GAUGE:0:U -current_sessions value:GAUGE:0:U -current value:GAUGE:U:U -delay value:GAUGE:-1000000:1000000 -derive value:DERIVE:0:U -df_complex value:GAUGE:0:U -df_inodes value:GAUGE:0:U -df used:GAUGE:0:1125899906842623, free:GAUGE:0:1125899906842623 -disk_latency read:GAUGE:0:U, write:GAUGE:0:U -disk_merged read:DERIVE:0:U, write:DERIVE:0:U -disk_octets read:DERIVE:0:U, write:DERIVE:0:U -disk_ops_complex value:DERIVE:0:U -disk_ops read:DERIVE:0:U, write:DERIVE:0:U -disk_time read:DERIVE:0:U, write:DERIVE:0:U -dns_answer value:DERIVE:0:U -dns_notify value:DERIVE:0:U -dns_octets queries:DERIVE:0:U, responses:DERIVE:0:U -dns_opcode value:DERIVE:0:U -dns_qtype_cached value:GAUGE:0:4294967295 -dns_qtype value:DERIVE:0:U -dns_query value:DERIVE:0:U -dns_question value:DERIVE:0:U -dns_rcode value:DERIVE:0:U -dns_reject value:DERIVE:0:U -dns_request value:DERIVE:0:U -dns_resolver value:DERIVE:0:U -dns_response value:DERIVE:0:U -dns_transfer value:DERIVE:0:U -dns_update value:DERIVE:0:U -dns_zops value:DERIVE:0:U -duration seconds:GAUGE:0:U -email_check value:GAUGE:0:U -email_count value:GAUGE:0:U -email_size value:GAUGE:0:U -entropy value:GAUGE:0:4294967295 -fanspeed value:GAUGE:0:U -file_size value:GAUGE:0:U -files value:GAUGE:0:U -fork_rate value:DERIVE:0:U -frequency_offset value:GAUGE:-1000000:1000000 -frequency value:GAUGE:0:U -fscache_stat value:DERIVE:0:U -gauge value:GAUGE:U:U -hash_collisions value:DERIVE:0:U -http_request_methods value:DERIVE:0:U -http_requests value:DERIVE:0:U -http_response_codes value:DERIVE:0:U -humidity value:GAUGE:0:100 -if_collisions value:DERIVE:0:U -if_dropped rx:DERIVE:0:U, tx:DERIVE:0:U -if_errors rx:DERIVE:0:U, tx:DERIVE:0:U -if_multicast value:DERIVE:0:U -if_octets rx:DERIVE:0:U, tx:DERIVE:0:U -if_packets rx:DERIVE:0:U, tx:DERIVE:0:U -if_rx_errors value:DERIVE:0:U -if_rx_octets value:DERIVE:0:U -if_tx_errors value:DERIVE:0:U -if_tx_octets value:DERIVE:0:U -invocations value:DERIVE:0:U -io_octets rx:DERIVE:0:U, tx:DERIVE:0:U -io_packets rx:DERIVE:0:U, tx:DERIVE:0:U -ipt_bytes value:DERIVE:0:U -ipt_packets value:DERIVE:0:U -irq value:DERIVE:0:U -latency value:GAUGE:0:U -links value:GAUGE:0:U -load shortterm:GAUGE:0:5000, midterm:GAUGE:0:5000, longterm:GAUGE:0:5000 -md_disks value:GAUGE:0:U -memcached_command value:DERIVE:0:U -memcached_connections value:GAUGE:0:U -memcached_items value:GAUGE:0:U -memcached_octets rx:DERIVE:0:U, tx:DERIVE:0:U -memcached_ops value:DERIVE:0:U -memory value:GAUGE:0:281474976710656 -multimeter value:GAUGE:U:U -mutex_operations value:DERIVE:0:U -mysql_commands value:DERIVE:0:U -mysql_handler value:DERIVE:0:U -mysql_locks value:DERIVE:0:U -mysql_log_position value:DERIVE:0:U -mysql_octets rx:DERIVE:0:U, tx:DERIVE:0:U -nfs_procedure value:DERIVE:0:U -nginx_connections value:GAUGE:0:U -nginx_requests value:DERIVE:0:U -node_octets rx:DERIVE:0:U, tx:DERIVE:0:U -node_rssi value:GAUGE:0:255 -node_stat value:DERIVE:0:U -node_tx_rate value:GAUGE:0:127 -objects value:GAUGE:0:U -operations value:DERIVE:0:U -percent value:GAUGE:0:100.1 -percent_bytes value:GAUGE:0:100.1 -percent_inodes value:GAUGE:0:100.1 -pf_counters value:DERIVE:0:U -pf_limits value:DERIVE:0:U -pf_source value:DERIVE:0:U -pf_states value:GAUGE:0:U -pf_state value:DERIVE:0:U -pg_blks value:DERIVE:0:U -pg_db_size value:GAUGE:0:U -pg_n_tup_c value:DERIVE:0:U -pg_n_tup_g value:GAUGE:0:U -pg_numbackends value:GAUGE:0:U -pg_scan value:DERIVE:0:U -pg_xact value:DERIVE:0:U -ping_droprate value:GAUGE:0:100 -ping_stddev value:GAUGE:0:65535 -ping value:GAUGE:0:65535 -players value:GAUGE:0:1000000 -power value:GAUGE:0:U -protocol_counter value:DERIVE:0:U -ps_code value:GAUGE:0:9223372036854775807 -ps_count processes:GAUGE:0:1000000, threads:GAUGE:0:1000000 -ps_cputime user:DERIVE:0:U, syst:DERIVE:0:U -ps_data value:GAUGE:0:9223372036854775807 -ps_disk_octets read:DERIVE:0:U, write:DERIVE:0:U -ps_disk_ops read:DERIVE:0:U, write:DERIVE:0:U -ps_pagefaults minflt:DERIVE:0:U, majflt:DERIVE:0:U -ps_rss value:GAUGE:0:9223372036854775807 -ps_stacksize value:GAUGE:0:9223372036854775807 -ps_state value:GAUGE:0:65535 -ps_vm value:GAUGE:0:9223372036854775807 -queue_length value:GAUGE:0:U -records value:GAUGE:0:U -requests value:GAUGE:0:U -response_time value:GAUGE:0:U -response_code value:GAUGE:0:U -route_etx value:GAUGE:0:U -route_metric value:GAUGE:0:U -routes value:GAUGE:0:U -serial_octets rx:DERIVE:0:U, tx:DERIVE:0:U -signal_noise value:GAUGE:U:0 -signal_power value:GAUGE:U:0 -signal_quality value:GAUGE:0:U -snr value:GAUGE:0:U -spam_check value:GAUGE:0:U -spam_score value:GAUGE:U:U -spl value:GAUGE:U:U -swap_io value:DERIVE:0:U -swap value:GAUGE:0:1099511627776 -tcp_connections value:GAUGE:0:4294967295 -temperature value:GAUGE:U:U -threads value:GAUGE:0:U -time_dispersion value:GAUGE:-1000000:1000000 -timeleft value:GAUGE:0:U -time_offset value:GAUGE:-1000000:1000000 -total_bytes value:DERIVE:0:U -total_connections value:DERIVE:0:U -total_objects value:DERIVE:0:U -total_operations value:DERIVE:0:U -total_requests value:DERIVE:0:U -total_sessions value:DERIVE:0:U -total_threads value:DERIVE:0:U -total_time_in_ms value:DERIVE:0:U -total_values value:DERIVE:0:U -uptime value:GAUGE:0:4294967295 -users value:GAUGE:0:65535 -vcl value:GAUGE:0:65535 -vcpu value:GAUGE:0:U -virt_cpu_total value:DERIVE:0:U -virt_vcpu value:DERIVE:0:U -vmpage_action value:DERIVE:0:U -vmpage_faults minflt:DERIVE:0:U, majflt:DERIVE:0:U -vmpage_io in:DERIVE:0:U, out:DERIVE:0:U -vmpage_number value:GAUGE:0:4294967295 -volatile_changes value:GAUGE:0:U -voltage_threshold value:GAUGE:U:U, threshold:GAUGE:U:U -voltage value:GAUGE:U:U -vs_memory value:GAUGE:0:9223372036854775807 -vs_processes value:GAUGE:0:65535 -vs_threads value:GAUGE:0:65535 -# -# Legacy types -# (required for the v5 upgrade target) -# -arc_counts demand_data:COUNTER:0:U, demand_metadata:COUNTER:0:U, prefetch_data:COUNTER:0:U, prefetch_metadata:COUNTER:0:U -arc_l2_bytes read:COUNTER:0:U, write:COUNTER:0:U -arc_l2_size value:GAUGE:0:U -arc_ratio value:GAUGE:0:U -arc_size current:GAUGE:0:U, target:GAUGE:0:U, minlimit:GAUGE:0:U, maxlimit:GAUGE:0:U -mysql_qcache hits:COUNTER:0:U, inserts:COUNTER:0:U, not_cached:COUNTER:0:U, lowmem_prunes:COUNTER:0:U, queries_in_cache:GAUGE:0:U -mysql_threads running:GAUGE:0:U, connected:GAUGE:0:U, cached:GAUGE:0:U, created:COUNTER:0:U -` diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md deleted file mode 100644 index 90de2b2b6..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/README.md +++ /dev/null @@ -1,3 +0,0 @@ -collectD Client -============ -This directory contains code for generating collectd load. diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go deleted file mode 100644 index 790f5e871..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/collectd/test_client/client.go +++ /dev/null @@ -1,71 +0,0 @@ -package main - -import ( - "collectd.org/api" - "collectd.org/network" - - "flag" - "fmt" - "math/rand" - "os" - "strconv" - "time" -) - -var nMeasurments = flag.Int("m", 1, "Number of measurements") -var tagVariance = flag.Int("v", 1, "Number of values per tag. Client is fixed at one tag") -var rate = flag.Int("r", 1, "Number of points per second") -var total = flag.Int("t", -1, "Total number of points to send (default is no limit)") -var host = flag.String("u", "127.0.0.1:25826", "Destination host in the form host:port") - -func main() { - flag.Parse() - - conn, err := network.Dial(*host, network.ClientOptions{}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - defer conn.Close() - - rateLimiter := make(chan int, *rate) - - go func() { - ticker := time.NewTicker(time.Second) - for { - select { - case <-ticker.C: - for i := 0; i < *rate; i++ { - rateLimiter <- i - } - } - } - }() - - nSent := 0 - for { - if nSent >= *total && *total > 0 { - break - } - <-rateLimiter - - vl := api.ValueList{ - Identifier: api.Identifier{ - Host: "tagvalue" + strconv.Itoa(int(rand.Int31n(int32(*tagVariance)))), - Plugin: "golang" + strconv.Itoa(int(rand.Int31n(int32(*nMeasurments)))), - Type: "gauge", - }, - Time: time.Now(), - Interval: 10 * time.Second, - Values: []api.Value{api.Gauge(42.0)}, - } - if err := conn.Write(vl); err != nil { - fmt.Println(err) - os.Exit(1) - } - conn.Flush() - nSent = nSent + 1 - } - - fmt.Println("Number of points sent:", nSent) -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go deleted file mode 100644 index 1549a816b..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config.go +++ /dev/null @@ -1,65 +0,0 @@ -package continuous_querier - -import ( - "time" - - "github.com/influxdb/influxdb/toml" -) - -const ( - DefaultRecomputePreviousN = 2 - - DefaultRecomputeNoOlderThan = 10 * time.Minute - - DefaultComputeRunsPerInterval = 10 - - DefaultComputeNoMoreThan = 2 * time.Minute -) - -// Config represents a configuration for the continuous query service. -type Config struct { - // Enables logging in CQ service to display when CQ's are processed and how many points are wrote. - LogEnabled bool `toml:"log-enabled"` - - // If this flag is set to false, both the brokers and data nodes should ignore any CQ processing. - Enabled bool `toml:"enabled"` - - // when continuous queries are run we'll automatically recompute previous intervals - // in case lagged data came in. Set to zero if you never have lagged data. We do - // it this way because invalidating previously computed intervals would be insanely hard - // and expensive. - RecomputePreviousN int `toml:"recompute-previous-n"` - - // The RecomputePreviousN setting provides guidance for how far back to recompute, the RecomputeNoOlderThan - // setting sets a ceiling on how far back in time it will go. For example, if you have 2 PreviousN - // and have this set to 10m, then we'd only compute the previous two intervals for any - // CQs that have a group by time <= 5m. For all others, we'd only recompute the previous window - RecomputeNoOlderThan toml.Duration `toml:"recompute-no-older-than"` - - // ComputeRunsPerInterval will determine how many times the current and previous N intervals - // will be computed. The group by time will be divided by this and it will get computed this many times: - // group by time seconds / runs per interval - // This will give partial results for current group by intervals and will determine how long it will - // be until lagged data is recomputed. For example, if this number is 10 and the group by time is 10m, it - // will be a minute past the previous 10m bucket of time before lagged data is picked up - ComputeRunsPerInterval int `toml:"compute-runs-per-interval"` - - // ComputeNoMoreThan paired with the RunsPerInterval will determine the ceiling of how many times smaller - // group by times will be computed. For example, if you have RunsPerInterval set to 10 and this setting - // to 1m. Then for a group by time(1m) will actually only get computed once per interval (and once per PreviousN). - // If you have a group by time(5m) then you'll get five computes per interval. Any group by time window larger - // than 10m will get computed 10 times for each interval. - ComputeNoMoreThan toml.Duration `toml:"compute-no-more-than"` -} - -// NewConfig returns a new instance of Config with defaults. -func NewConfig() Config { - return Config{ - LogEnabled: true, - Enabled: true, - RecomputePreviousN: DefaultRecomputePreviousN, - RecomputeNoOlderThan: toml.Duration(DefaultRecomputeNoOlderThan), - ComputeRunsPerInterval: DefaultComputeRunsPerInterval, - ComputeNoMoreThan: toml.Duration(DefaultComputeNoMoreThan), - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go deleted file mode 100644 index 2a0edc4f2..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/config_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package continuous_querier_test - -import ( - "testing" - "time" - - "github.com/BurntSushi/toml" - "github.com/influxdb/influxdb/services/continuous_querier" -) - -func TestConfig_Parse(t *testing.T) { - // Parse configuration. - var c continuous_querier.Config - if _, err := toml.Decode(` -recompute-previous-n = 1 -recompute-no-older-than = "10s" -compute-runs-per-interval = 2 -compute-no-more-than = "20s" -enabled = true -`, &c); err != nil { - t.Fatal(err) - } - - // Validate configuration. - if c.RecomputePreviousN != 1 { - t.Fatalf("unexpected recompute previous n: %d", c.RecomputePreviousN) - } else if time.Duration(c.RecomputeNoOlderThan) != 10*time.Second { - t.Fatalf("unexpected recompute no older than: %v", c.RecomputeNoOlderThan) - } else if c.ComputeRunsPerInterval != 2 { - t.Fatalf("unexpected compute runs per interval: %d", c.ComputeRunsPerInterval) - } else if time.Duration(c.ComputeNoMoreThan) != 20*time.Second { - t.Fatalf("unexpected compute no more than: %v", c.ComputeNoMoreThan) - } else if c.Enabled != true { - t.Fatalf("unexpected enabled: %v", c.Enabled) - } -} diff --git a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md b/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md deleted file mode 100644 index bd14b5161..000000000 --- a/Godeps/_workspace/src/github.com/influxdb/influxdb/services/continuous_querier/continuous_queries.md +++ /dev/null @@ -1,236 +0,0 @@ -# Continuous Queries - -This document lays out continuous queries and a proposed architecture for how they'll work within an InfluxDB cluster. - -## Definition of Continuous Queries - -Continuous queries serve two purposes in InfluxDB: - -1. Combining many series into a single series (i.e. removing 1 or more tag dimensions to make queries more efficient) -2. Aggregating and downsampling series - -The purpose of both types of continuous queries is to duplicate or downsample data automatically in the background to make querying thier results fast and efficient. Think of them as another way to create indexes on data. - -Generally, there are continuous queries that create copyies of data into another measurement or tagset and queries that downsample and aggregate data. The only difference between the two types is if the query has a `GROUP BY time` clause. - -Before we get to the continuous query examples, we need to define the `INTO` syntax of queries. - -### INTO - -`INTO` is a method for running a query and having it output into either another measurement name, retention policy, or database. The syntax looks like this: - -```sql -SELECT * -INTO [.] [ON ] -FROM -[WHERE ...] -[GROUP BY ...] -``` - -The syntax states that the retention policy, database, where clause, and group by clause are all optional. If a retention policy isn't specified, the database's default retention policy will be written into. If the database isn't specified, the database the query is running from will be written into. - -By selecting specific fields, `INTO` can merge many series into one that will go into a new either a new measurement, retention policy, or database. For example: - -```sql -SELECT mean(value) as value, region -INTO "1h.cpu_load" -FROM cpu_load -GROUP BY time(1h), region -``` - -That will give 1h summaries of the mean value of the `cpu_load` for each `region`. Specifying `region` in the `GROUP BY` clause is unnecessary since having it in the `SELECT` clause forces it to be grouped by that tag, we've just included it in the example for clarity. - -With `SELECT ... INTO`, fields will be written as fields and tags will be written as tags. - -### Continuous Query Syntax - -The `INTO` queries run once. Continuous queries will turn `INTO` queries into something that run in the background in the cluster. They're kind of like triggers in SQL. - -```sql -CREATE CONTINUOUS QUERY "1h_cpu_load" -ON database_name -BEGIN - SELECT mean(value) as value, region - INTO "1h.cpu_load" - FROM cpu_load - GROUP BY time(1h), region -END -``` - -Or chain them together: - -```sql -CREATE CONTINUOUS QUERY "10m_event_count" -ON database_name -BEGIN - SELECT count(value) - INTO "10m.events" - FROM events - GROUP BY time(10m) -END - --- this selects from the output of one continuous query and outputs to another series -CREATE CONTINUOUS QUERY "1h_event_count" -ON database_name -BEGIN - SELECT sum(count) as count - INTO "1h.events" - FROM events - GROUP BY time(1h) -END -``` - -Or multiple aggregations from all series in a measurement. This example assumes you have a retention policy named `1h`. - -```sql -CREATE CONTINUOUS QUERY "1h_cpu_load" -ON database_name -BEGIN - SELECT mean(value), percentile(80, value) as percentile_80, percentile(95, value) as percentile_95 - INTO "1h.cpu_load" - FROM cpu_load - GROUP BY time(1h), * -END -``` - -The `GROUP BY *` indicates that we want to group by the tagset of the points written in. The same tags will be written to the output series. The multiple aggregates in the `SELECT` clause (percentile, mean) will be written in as fields to the resulting series. - -Showing what continuous queries we have: - -```sql -LIST CONTINUOUS QUERIES -``` - -Dropping continuous queries: - -```sql -DROP CONTINUOUS QUERY -ON -``` - -### Security - -To create or drop a continuous query, the user must be an admin. - -### Limitations - -In order to prevent cycles and endless copying of data, the following limitation is enforced on continuous queries at create time: - -*The output of a continuous query must go to either a different measurement or to a different retention policy.* - -In theory they'd still be able to create a cycle with multiple continuous queries. We should check for these and disallow. - -## Proposed Architecture - -Continuous queries should be stored in the metastore cluster wide. That is, they amount to database schema that should be stored in every server in a cluster. - -Continuous queries will have to be handled in a different way for two different use cases: those that simply copy data (CQs without a group by time) and those that aggregate and downsample data (those with a group by time). - -### No group by time - -For CQs that have no `GROUP BY time` clause, they should be evaluated at the data node as part of the write. The single write should create any other writes for the CQ and submit those in the same request to the brokers to ensure that all writes succeed (both the original and the new CQ writes) or none do. - -I imagine the process going something like this: - -1. Convert the data point into its compact form `