Remove Godeps/ directory
This commit is contained in:
parent
e02973b6f4
commit
811a54af6c
|
@ -1,277 +0,0 @@
|
|||
{
|
||||
"ImportPath": "github.com/influxdb/telegraf",
|
||||
"GoVersion": "go1.5.1",
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/ww/goautoneg",
|
||||
"Comment": "null-5",
|
||||
"Rev": "75cd24fc2f2c2a2088577d12123ddee5f54e0675"
|
||||
},
|
||||
{
|
||||
"ImportPath": "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git",
|
||||
"Comment": "v0.9.1-14-g546c47a",
|
||||
"Rev": "546c47a6d0e9492e77f6f37473d59c36a708e08b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Shopify/sarama",
|
||||
"Comment": "v1.4.3-45-g5b18996",
|
||||
"Rev": "5b18996ef1cd555a60562ae4c5d7843ae137e12d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.8.6-7-g9c060de",
|
||||
"Rev": "9c060de643590dae45da9d7c26276463bfc46fa0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/amir/raidman",
|
||||
"Rev": "6a8e089bbe32e6b907feae5ba688841974b3c339"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/armon/go-metrics",
|
||||
"Rev": "b2d95e5291cdbc26997d1301a5e467ecbb240e25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/boltdb/bolt",
|
||||
"Comment": "v1.0-117-g0f053fa",
|
||||
"Rev": "0f053fabc06119583d61937a0a06ef0ba0f1b301"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cenkalti/backoff",
|
||||
"Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dancannon/gorethink/encoding",
|
||||
"Comment": "v1.x.x-1-g786f12a",
|
||||
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dancannon/gorethink/ql2",
|
||||
"Comment": "v1.x.x-1-g786f12a",
|
||||
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dancannon/gorethink/types",
|
||||
"Comment": "v1.x.x-1-g786f12a",
|
||||
"Rev": "786f12ae730ea93485c4eb2c44b3ede6e1e8745f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/eapache/go-resiliency/breaker",
|
||||
"Comment": "v1.0.0-1-ged0319b",
|
||||
"Rev": "ed0319b32e66e3295db52695ba3ee493e823fbfe"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/eapache/queue",
|
||||
"Comment": "v1.0.2",
|
||||
"Rev": "ded5959c0d4e360646dc9e9908cff48666781367"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "ef410296f87750305e1e1acf9ad2ba3833dcb004"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-sql-driver/mysql",
|
||||
"Comment": "v1.2-118-g3dd7008",
|
||||
"Rev": "3dd7008ac1529aca1bcd8a9db75228a71ba23cac"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Rev": "cabd153b69f71bab8b89fd667a2d9bb28c92ceb4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "73aaaa9eb61d74fbf7e256ca586a3a565b308eea"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "723cc1e459b8eea2dea4583200fd60757d40097a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gonuts/go-shellquote",
|
||||
"Rev": "e842a11b24c6abfb3dd27af69a17f482e4b483c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/go-msgpack/codec",
|
||||
"Rev": "fa3f63826f7c23912c15263591e65d54d080b458"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft",
|
||||
"Rev": "9b586e29edf1ed085b11da7772479ee45c433996"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/raft-boltdb",
|
||||
"Rev": "d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/influxdb/influxdb",
|
||||
"Comment": "v0.9.4-rc1-922-gb0e9f7e",
|
||||
"Rev": "b0e9f7e844225b05abf9f4455229490f99348ac4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/lib/pq",
|
||||
"Comment": "go1.0-cutoff-59-gb269bd0",
|
||||
"Rev": "b269bd035a727d6c1081f76e7a239a1b00674c40"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mreiferson/go-snappystream",
|
||||
"Comment": "v0.2.3",
|
||||
"Rev": "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/naoina/go-stringutil",
|
||||
"Rev": "360db0db4b01d34e12a2ec042c09e7d37fece761"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/naoina/toml",
|
||||
"Rev": "5811abcabb29d6af0fdf060f96d328962bd3cd5e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/nsqio/go-nsq",
|
||||
"Comment": "v1.0.5-6-g2118015",
|
||||
"Rev": "2118015c120962edc5d03325c680daf3163a8b5f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-52-ge51041b",
|
||||
"Rev": "e51041b3fa41cece0dca035740ba6411905be473"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "369ec0491ce7be15431bd4f23b7fa17308f94190"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "454a56f35412459b5e684fd5ec0f9211b94f002a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/samuel/go-zookeeper/zk",
|
||||
"Rev": "5bb5cfc093ad18a28148c578f8632cfdb4d802e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/cpu",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/disk",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/docker",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/host",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/load",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/mem",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/net",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shirou/gopsutil/process",
|
||||
"Comment": "1.0.0-208-g759e96e",
|
||||
"Rev": "759e96ebaffb01c3cba0e8b129ef29f56507b323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/streadway/amqp",
|
||||
"Rev": "f4879ba28fffbb576743b03622a9ff20461826b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/objx",
|
||||
"Rev": "cbeaeb16a013161a98496fad62933b1d21786672"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.0-21-gf552045",
|
||||
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Comment": "v1.0-21-gf552045",
|
||||
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.0-21-gf552045",
|
||||
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/suite",
|
||||
"Comment": "v1.0-21-gf552045",
|
||||
"Rev": "f5520455607c0233cb6d7b056f71b22c1d265ef1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/wvanbergen/kafka/consumergroup",
|
||||
"Rev": "b0e5c20a0d7c3ccfd37a5965ae30a3a0fd15945d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/wvanbergen/kazoo-go",
|
||||
"Rev": "02a3868e9b87153285439cd27a39c0a2984a13af"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/bcrypt",
|
||||
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "173ce04bfaf66c7bb0fa9d5c0bfd93e773909dbd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/websocket",
|
||||
"Rev": "db8e4de5b2d6653f66aea53094624468caad15d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/dancannon/gorethink.v1",
|
||||
"Comment": "v1.x.x",
|
||||
"Rev": "8aca6ba2cc6e873299617d730fac0d7f6593113a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fatih/pool.v2",
|
||||
"Rev": "cba550ebf9bce999a02e963296d4bc7a486cb715"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/mgo.v2",
|
||||
"Comment": "r2015.06.03-3-g3569c88",
|
||||
"Rev": "3569c88678d88179dcbd68d02ab081cbca3cd4d0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "7ad95dd0798a40da1ccdff6dff35fd177b5edf40"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
|
@ -1,2 +0,0 @@
|
|||
/pkg
|
||||
/bin
|
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
|
@ -1,67 +0,0 @@
|
|||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
|
@ -1,162 +0,0 @@
|
|||
/*
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
*/
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
*.msg
|
||||
*.lok
|
||||
|
||||
samples/trivial
|
||||
samples/trivial2
|
||||
samples/sample
|
||||
samples/reconnect
|
||||
samples/ssl
|
||||
samples/custom_store
|
||||
samples/simple
|
||||
samples/stdinpub
|
||||
samples/stdoutsub
|
||||
samples/routing
|
|
@ -1,69 +0,0 @@
|
|||
Contributing to Paho
|
||||
====================
|
||||
|
||||
Thanks for your interest in this project.
|
||||
|
||||
Project description:
|
||||
--------------------
|
||||
|
||||
The Paho project has been created to provide scalable open-source implementations of open and standard messaging protocols aimed at new, existing, and emerging applications for Machine-to-Machine (M2M) and Internet of Things (IoT).
|
||||
Paho reflects the inherent physical and cost constraints of device connectivity. Its objectives include effective levels of decoupling between devices and applications, designed to keep markets open and encourage the rapid growth of scalable Web and Enterprise middleware and applications. Paho is being kicked off with MQTT publish/subscribe client implementations for use on embedded platforms, along with corresponding server support as determined by the community.
|
||||
|
||||
- https://projects.eclipse.org/projects/technology.paho
|
||||
|
||||
Developer resources:
|
||||
--------------------
|
||||
|
||||
Information regarding source code management, builds, coding standards, and more.
|
||||
|
||||
- https://projects.eclipse.org/projects/technology.paho/developer
|
||||
|
||||
Contributor License Agreement:
|
||||
------------------------------
|
||||
|
||||
Before your contribution can be accepted by the project, you need to create and electronically sign the Eclipse Foundation Contributor License Agreement (CLA).
|
||||
|
||||
- http://www.eclipse.org/legal/CLA.php
|
||||
|
||||
Contributing Code:
|
||||
------------------
|
||||
|
||||
The Go client uses git with Gerrit for code review, use the following URLs for Gerrit access;
|
||||
|
||||
ssh://<username>@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang
|
||||
|
||||
Configure a remote called review to push your changes to;
|
||||
|
||||
git config remote.review.url ssh://<username>@git.eclipse.org:29418/paho/org.eclipse.paho.mqtt.golang
|
||||
git config remote.review.push HEAD:refs/for/<branch>
|
||||
|
||||
When you have made and committed a change you can push it to Gerrit for review with;
|
||||
|
||||
git push review
|
||||
|
||||
See https://wiki.eclipse.org/Gerrit for more details on how Gerrit is used in Eclipse, https://wiki.eclipse.org/Gerrit#Gerrit_Code_Review_Cheatsheet has some particularly useful information.
|
||||
|
||||
Git commit messages should follow the style described here;
|
||||
|
||||
http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
|
||||
|
||||
Contact:
|
||||
--------
|
||||
|
||||
Contact the project developers via the project's "dev" list.
|
||||
|
||||
- https://dev.eclipse.org/mailman/listinfo/paho-dev
|
||||
|
||||
Search for bugs:
|
||||
----------------
|
||||
|
||||
This project uses Bugzilla to track ongoing development and issues.
|
||||
|
||||
- https://bugs.eclipse.org/bugs/buglist.cgi?product=Paho&component=MQTT-Go
|
||||
|
||||
Create a new bug:
|
||||
-----------------
|
||||
|
||||
Be sure to search for existing bugs before you create another one. Remember that contributions are always welcome!
|
||||
|
||||
- https://bugs.eclipse.org/bugs/enter_bug.cgi?product=Paho
|
|
@ -1,15 +0,0 @@
|
|||
|
||||
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,87 +0,0 @@
|
|||
Eclipse Public License - v 1.0
|
||||
|
||||
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
|
||||
|
||||
1. DEFINITIONS
|
||||
|
||||
"Contribution" means:
|
||||
|
||||
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
|
||||
|
||||
b) in the case of each subsequent Contributor:
|
||||
|
||||
i) changes to the Program, and
|
||||
|
||||
ii) additions to the Program;
|
||||
|
||||
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
|
||||
|
||||
"Contributor" means any person or entity that distributes the Program.
|
||||
|
||||
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
|
||||
|
||||
"Program" means the Contributions distributed in accordance with this Agreement.
|
||||
|
||||
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
|
||||
|
||||
2. GRANT OF RIGHTS
|
||||
|
||||
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
|
||||
|
||||
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
|
||||
|
||||
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
|
||||
|
||||
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
|
||||
|
||||
3. REQUIREMENTS
|
||||
|
||||
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
|
||||
|
||||
a) it complies with the terms and conditions of this Agreement; and
|
||||
|
||||
b) its license agreement:
|
||||
|
||||
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
|
||||
|
||||
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
|
||||
|
||||
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
|
||||
|
||||
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
|
||||
|
||||
When the Program is made available in source code form:
|
||||
|
||||
a) it must be made available under this Agreement; and
|
||||
|
||||
b) a copy of this Agreement must be included with each copy of the Program.
|
||||
|
||||
Contributors may not remove or alter any copyright notices contained within the Program.
|
||||
|
||||
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
|
||||
|
||||
4. COMMERCIAL DISTRIBUTION
|
||||
|
||||
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
|
||||
|
||||
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
|
||||
|
||||
5. NO WARRANTY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
|
||||
|
||||
6. DISCLAIMER OF LIABILITY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. GENERAL
|
||||
|
||||
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
|
||||
|
||||
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
|
||||
|
||||
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
|
||||
|
||||
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
|
||||
|
||||
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
|
|
@ -1,62 +0,0 @@
|
|||
Eclipse Paho MQTT Go client
|
||||
===========================
|
||||
|
||||
|
||||
This repository contains the source code for the [Eclipse Paho](http://eclipse.org/paho) MQTT Go client library.
|
||||
|
||||
This code builds a library which enable applications to connect to an [MQTT](http://mqtt.org) broker to publish messages, and to subscribe to topics and receive published messages.
|
||||
|
||||
This library supports a fully asynchronous mode of operation.
|
||||
|
||||
|
||||
Installation and Build
|
||||
----------------------
|
||||
|
||||
This client is designed to work with the standard Go tools, so installation is as easy as:
|
||||
|
||||
```
|
||||
go get git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git
|
||||
```
|
||||
|
||||
The client depends on Google's [websockets](http://godoc.org/code.google.com/p/go.net/websocket) package,
|
||||
also easily installed with the command:
|
||||
|
||||
```
|
||||
go get code.google.com/p/go.net/websocket
|
||||
```
|
||||
|
||||
|
||||
Usage and API
|
||||
-------------
|
||||
|
||||
Detailed API documentation is available by using to godoc tool, or can be browsed online
|
||||
using the [godoc.org](http://godoc.org/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git) service.
|
||||
|
||||
Make use of the library by importing it in your Go client source code. For example,
|
||||
```
|
||||
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
```
|
||||
|
||||
Samples are available in the `/samples` directory for reference.
|
||||
|
||||
|
||||
Runtime tracing
|
||||
---------------
|
||||
|
||||
Tracing is enabled by assigning logs (from the Go log package) to the logging endpoints, ERROR, CRITICAL, WARN and DEBUG
|
||||
|
||||
|
||||
Reporting bugs
|
||||
--------------
|
||||
|
||||
Please report bugs under the "MQTT-Go" Component in [Eclipse Bugzilla](http://bugs.eclipse.org/bugs/) for the Paho Technology project. This is a very new library as of Q1 2014, so there are sure to be bugs.
|
||||
|
||||
|
||||
More information
|
||||
----------------
|
||||
|
||||
Discussion of the Paho clients takes place on the [Eclipse paho-dev mailing list](https://dev.eclipse.org/mailman/listinfo/paho-dev).
|
||||
|
||||
General questions about the MQTT protocol are discussed in the [MQTT Google Group](https://groups.google.com/forum/?hl=en-US&fromgroups#!forum/mqtt).
|
||||
|
||||
There is much more information available via the [MQTT community site](http://mqtt.org).
|
|
@ -1,41 +0,0 @@
|
|||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml"><head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
|
||||
<title>About</title>
|
||||
</head>
|
||||
<body lang="EN-US">
|
||||
<h2>About This Content</h2>
|
||||
|
||||
<p><em>December 9, 2013</em></p>
|
||||
<h3>License</h3>
|
||||
|
||||
<p>The Eclipse Foundation makes available all content in this plug-in ("Content"). Unless otherwise
|
||||
indicated below, the Content is provided to you under the terms and conditions of the
|
||||
Eclipse Public License Version 1.0 ("EPL") and Eclipse Distribution License Version 1.0 ("EDL").
|
||||
A copy of the EPL is available at
|
||||
<a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>
|
||||
and a copy of the EDL is available at
|
||||
<a href="http://www.eclipse.org/org/documents/edl-v10.php">http://www.eclipse.org/org/documents/edl-v10.php</a>.
|
||||
For purposes of the EPL, "Program" will mean the Content.</p>
|
||||
|
||||
<p>If you did not receive this Content directly from the Eclipse Foundation, the Content is
|
||||
being redistributed by another party ("Redistributor") and different terms and conditions may
|
||||
apply to your use of any object code in the Content. Check the Redistributor's license that was
|
||||
provided with the Content. If no such license exists, contact the Redistributor. Unless otherwise
|
||||
indicated below, the terms and conditions of the EPL still apply to any source code in the Content
|
||||
and such source code may be obtained at <a href="http://www.eclipse.org/">http://www.eclipse.org</a>.</p>
|
||||
|
||||
|
||||
<h3>Third Party Content</h3>
|
||||
<p>The Content includes items that have been sourced from third parties as set out below. If you
|
||||
did not receive this Content directly from the Eclipse Foundation, the following is provided
|
||||
for informational purposes only, and you should look to the Redistributor's license for
|
||||
terms and conditions of use.</p>
|
||||
<p><em>
|
||||
<strong>None</strong> <br><br>
|
||||
<br><br>
|
||||
</em></p>
|
||||
|
||||
|
||||
|
||||
</body></html>
|
|
@ -1,517 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
// Package mqtt provides an MQTT v3.1.1 client library.
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ClientInt is the interface definition for a Client as used by this
|
||||
// library, the interface is primarily to allow mocking tests.
|
||||
type ClientInt interface {
|
||||
IsConnected() bool
|
||||
Connect() Token
|
||||
Disconnect(uint)
|
||||
disconnect()
|
||||
Publish(string, byte, bool, interface{}) Token
|
||||
Subscribe(string, byte, MessageHandler) Token
|
||||
SubscribeMultiple(map[string]byte, MessageHandler) Token
|
||||
Unsubscribe(...string) Token
|
||||
}
|
||||
|
||||
// Client is an MQTT v3.1.1 client for communicating
|
||||
// with an MQTT server using non-blocking methods that allow work
|
||||
// to be done in the background.
|
||||
// An application may connect to an MQTT server using:
|
||||
// A plain TCP socket
|
||||
// A secure SSL/TLS socket
|
||||
// A websocket
|
||||
// To enable ensured message delivery at Quality of Service (QoS) levels
|
||||
// described in the MQTT spec, a message persistence mechanism must be
|
||||
// used. This is done by providing a type which implements the Store
|
||||
// interface. For convenience, FileStore and MemoryStore are provided
|
||||
// implementations that should be sufficient for most use cases. More
|
||||
// information can be found in their respective documentation.
|
||||
// Numerous connection options may be specified by configuring a
|
||||
// and then supplying a ClientOptions type.
|
||||
type Client struct {
|
||||
sync.RWMutex
|
||||
messageIds
|
||||
conn net.Conn
|
||||
ibound chan packets.ControlPacket
|
||||
obound chan *PacketAndToken
|
||||
oboundP chan *PacketAndToken
|
||||
msgRouter *router
|
||||
stopRouter chan bool
|
||||
incomingPubChan chan *packets.PublishPacket
|
||||
errors chan error
|
||||
stop chan struct{}
|
||||
persist Store
|
||||
options ClientOptions
|
||||
lastContact lastcontact
|
||||
pingOutstanding bool
|
||||
connected bool
|
||||
workers sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewClient will create an MQTT v3.1.1 client with all of the options specified
|
||||
// in the provided ClientOptions. The client must have the Start method called
|
||||
// on it before it may be used. This is to make sure resources (such as a net
|
||||
// connection) are created before the application is actually ready.
|
||||
func NewClient(o *ClientOptions) *Client {
|
||||
c := &Client{}
|
||||
c.options = *o
|
||||
|
||||
if c.options.Store == nil {
|
||||
c.options.Store = NewMemoryStore()
|
||||
}
|
||||
switch c.options.ProtocolVersion {
|
||||
case 3, 4:
|
||||
c.options.protocolVersionExplicit = true
|
||||
default:
|
||||
c.options.ProtocolVersion = 4
|
||||
c.options.protocolVersionExplicit = false
|
||||
}
|
||||
c.persist = c.options.Store
|
||||
c.connected = false
|
||||
c.messageIds = messageIds{index: make(map[uint16]Token)}
|
||||
c.msgRouter, c.stopRouter = newRouter()
|
||||
c.msgRouter.setDefaultHandler(c.options.DefaultPublishHander)
|
||||
return c
|
||||
}
|
||||
|
||||
// IsConnected returns a bool signifying whether
|
||||
// the client is connected or not.
|
||||
func (c *Client) IsConnected() bool {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.connected
|
||||
}
|
||||
|
||||
func (c *Client) setConnected(status bool) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.connected = status
|
||||
}
|
||||
|
||||
//ErrNotConnected is the error returned from function calls that are
|
||||
//made when the client is not connected to a broker
|
||||
var ErrNotConnected = errors.New("Not Connected")
|
||||
|
||||
// Connect will create a connection to the message broker
|
||||
// If clean session is false, then a slice will
|
||||
// be returned containing Receipts for all messages
|
||||
// that were in-flight at the last disconnect.
|
||||
// If clean session is true, then any existing client
|
||||
// state will be removed.
|
||||
func (c *Client) Connect() Token {
|
||||
var err error
|
||||
t := newToken(packets.Connect).(*ConnectToken)
|
||||
DEBUG.Println(CLI, "Connect()")
|
||||
|
||||
go func() {
|
||||
var rc byte
|
||||
cm := newConnectMsgFromOptions(&c.options)
|
||||
|
||||
for _, broker := range c.options.Servers {
|
||||
CONN:
|
||||
DEBUG.Println(CLI, "about to write new connect msg")
|
||||
c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout)
|
||||
if err == nil {
|
||||
DEBUG.Println(CLI, "socket connected to broker")
|
||||
switch c.options.ProtocolVersion {
|
||||
case 3:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 3
|
||||
default:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
|
||||
c.options.ProtocolVersion = 4
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 4
|
||||
}
|
||||
cm.Write(c.conn)
|
||||
|
||||
rc = c.connect()
|
||||
if rc != packets.Accepted {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
//if the protocol version was explicitly set don't do any fallback
|
||||
if c.options.protocolVersionExplicit {
|
||||
ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not CONN_ACCEPTED, but rather", packets.ConnackReturnCodes[rc])
|
||||
continue
|
||||
}
|
||||
if c.options.ProtocolVersion == 4 {
|
||||
DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
|
||||
c.options.ProtocolVersion = 3
|
||||
goto CONN
|
||||
}
|
||||
}
|
||||
break
|
||||
} else {
|
||||
ERROR.Println(CLI, err.Error())
|
||||
WARN.Println(CLI, "failed to connect to broker, trying next")
|
||||
rc = packets.ErrNetworkError
|
||||
}
|
||||
}
|
||||
|
||||
if c.conn == nil {
|
||||
ERROR.Println(CLI, "Failed to connect to a broker")
|
||||
t.returnCode = rc
|
||||
if rc != packets.ErrNetworkError {
|
||||
t.err = packets.ConnErrors[rc]
|
||||
} else {
|
||||
t.err = fmt.Errorf("%s : %s", packets.ConnErrors[rc], err)
|
||||
}
|
||||
t.flowComplete()
|
||||
return
|
||||
}
|
||||
|
||||
c.lastContact.update()
|
||||
c.persist.Open()
|
||||
|
||||
c.obound = make(chan *PacketAndToken, 100)
|
||||
c.oboundP = make(chan *PacketAndToken, 100)
|
||||
c.ibound = make(chan packets.ControlPacket)
|
||||
c.errors = make(chan error)
|
||||
c.stop = make(chan struct{})
|
||||
|
||||
c.incomingPubChan = make(chan *packets.PublishPacket, 100)
|
||||
c.msgRouter.matchAndDispatch(c.incomingPubChan, c.options.Order, c)
|
||||
|
||||
c.workers.Add(1)
|
||||
go outgoing(c)
|
||||
go alllogic(c)
|
||||
|
||||
c.connected = true
|
||||
DEBUG.Println(CLI, "client is connected")
|
||||
if c.options.OnConnect != nil {
|
||||
go c.options.OnConnect(c)
|
||||
}
|
||||
|
||||
if c.options.KeepAlive != 0 {
|
||||
c.workers.Add(1)
|
||||
go keepalive(c)
|
||||
}
|
||||
|
||||
// Take care of any messages in the store
|
||||
//var leftovers []Receipt
|
||||
if c.options.CleanSession == false {
|
||||
//leftovers = c.resume()
|
||||
} else {
|
||||
c.persist.Reset()
|
||||
}
|
||||
|
||||
// Do not start incoming until resume has completed
|
||||
c.workers.Add(1)
|
||||
go incoming(c)
|
||||
|
||||
DEBUG.Println(CLI, "exit startClient")
|
||||
t.flowComplete()
|
||||
}()
|
||||
return t
|
||||
}
|
||||
|
||||
// internal function used to reconnect the client when it loses its connection
|
||||
func (c *Client) reconnect() {
|
||||
DEBUG.Println(CLI, "enter reconnect")
|
||||
var rc byte = 1
|
||||
var sleep uint = 1
|
||||
var err error
|
||||
|
||||
for rc != 0 {
|
||||
cm := newConnectMsgFromOptions(&c.options)
|
||||
|
||||
for _, broker := range c.options.Servers {
|
||||
CONN:
|
||||
DEBUG.Println(CLI, "about to write new connect msg")
|
||||
c.conn, err = openConnection(broker, &c.options.TLSConfig, c.options.ConnectTimeout)
|
||||
if err == nil {
|
||||
DEBUG.Println(CLI, "socket connected to broker")
|
||||
switch c.options.ProtocolVersion {
|
||||
case 3:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1 protocol")
|
||||
cm.ProtocolName = "MQIsdp"
|
||||
cm.ProtocolVersion = 3
|
||||
default:
|
||||
DEBUG.Println(CLI, "Using MQTT 3.1.1 protocol")
|
||||
c.options.ProtocolVersion = 4
|
||||
cm.ProtocolName = "MQTT"
|
||||
cm.ProtocolVersion = 4
|
||||
}
|
||||
cm.Write(c.conn)
|
||||
|
||||
rc = c.connect()
|
||||
if rc != packets.Accepted {
|
||||
c.conn.Close()
|
||||
c.conn = nil
|
||||
//if the protocol version was explicitly set don't do any fallback
|
||||
if c.options.protocolVersionExplicit {
|
||||
ERROR.Println(CLI, "Connecting to", broker, "CONNACK was not Accepted, but rather", packets.ConnackReturnCodes[rc])
|
||||
continue
|
||||
}
|
||||
if c.options.ProtocolVersion == 4 {
|
||||
DEBUG.Println(CLI, "Trying reconnect using MQTT 3.1 protocol")
|
||||
c.options.ProtocolVersion = 3
|
||||
goto CONN
|
||||
}
|
||||
}
|
||||
break
|
||||
} else {
|
||||
ERROR.Println(CLI, err.Error())
|
||||
WARN.Println(CLI, "failed to connect to broker, trying next")
|
||||
rc = packets.ErrNetworkError
|
||||
}
|
||||
}
|
||||
if rc != 0 {
|
||||
DEBUG.Println(CLI, "Reconnect failed, sleeping for", sleep, "seconds")
|
||||
time.Sleep(time.Duration(sleep) * time.Second)
|
||||
if sleep <= uint(c.options.MaxReconnectInterval.Seconds()) {
|
||||
sleep *= 2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.lastContact.update()
|
||||
c.stop = make(chan struct{})
|
||||
|
||||
c.workers.Add(1)
|
||||
go outgoing(c)
|
||||
go alllogic(c)
|
||||
|
||||
c.setConnected(true)
|
||||
DEBUG.Println(CLI, "client is reconnected")
|
||||
if c.options.OnConnect != nil {
|
||||
go c.options.OnConnect(c)
|
||||
}
|
||||
|
||||
if c.options.KeepAlive != 0 {
|
||||
c.workers.Add(1)
|
||||
go keepalive(c)
|
||||
}
|
||||
c.workers.Add(1)
|
||||
go incoming(c)
|
||||
}
|
||||
|
||||
// This function is only used for receiving a connack
|
||||
// when the connection is first started.
|
||||
// This prevents receiving incoming data while resume
|
||||
// is in progress if clean session is false.
|
||||
func (c *Client) connect() byte {
|
||||
DEBUG.Println(NET, "connect started")
|
||||
|
||||
ca, err := packets.ReadPacket(c.conn)
|
||||
if err != nil {
|
||||
ERROR.Println(NET, "connect got error", err)
|
||||
//c.errors <- err
|
||||
return packets.ErrNetworkError
|
||||
}
|
||||
msg := ca.(*packets.ConnackPacket)
|
||||
|
||||
if msg == nil || msg.FixedHeader.MessageType != packets.Connack {
|
||||
ERROR.Println(NET, "received msg that was nil or not CONNACK")
|
||||
} else {
|
||||
DEBUG.Println(NET, "received connack")
|
||||
}
|
||||
return msg.ReturnCode
|
||||
}
|
||||
|
||||
// Disconnect will end the connection with the server, but not before waiting
|
||||
// the specified number of milliseconds to wait for existing work to be
|
||||
// completed.
|
||||
func (c *Client) Disconnect(quiesce uint) {
|
||||
if !c.IsConnected() {
|
||||
WARN.Println(CLI, "already disconnected")
|
||||
return
|
||||
}
|
||||
DEBUG.Println(CLI, "disconnecting")
|
||||
c.setConnected(false)
|
||||
|
||||
dm := packets.NewControlPacket(packets.Disconnect).(*packets.DisconnectPacket)
|
||||
dt := newToken(packets.Disconnect)
|
||||
c.oboundP <- &PacketAndToken{p: dm, t: dt}
|
||||
|
||||
// wait for work to finish, or quiesce time consumed
|
||||
dt.WaitTimeout(time.Duration(quiesce) * time.Millisecond)
|
||||
c.disconnect()
|
||||
}
|
||||
|
||||
// ForceDisconnect will end the connection with the mqtt broker immediately.
|
||||
func (c *Client) forceDisconnect() {
|
||||
if !c.IsConnected() {
|
||||
WARN.Println(CLI, "already disconnected")
|
||||
return
|
||||
}
|
||||
c.setConnected(false)
|
||||
c.conn.Close()
|
||||
DEBUG.Println(CLI, "forcefully disconnecting")
|
||||
c.disconnect()
|
||||
}
|
||||
|
||||
func (c *Client) internalConnLost(err error) {
|
||||
close(c.stop)
|
||||
c.conn.Close()
|
||||
c.workers.Wait()
|
||||
if c.IsConnected() {
|
||||
if c.options.OnConnectionLost != nil {
|
||||
go c.options.OnConnectionLost(c, err)
|
||||
}
|
||||
if c.options.AutoReconnect {
|
||||
go c.reconnect()
|
||||
} else {
|
||||
c.setConnected(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) disconnect() {
|
||||
select {
|
||||
case <-c.stop:
|
||||
//someone else has already closed the channel, must be error
|
||||
default:
|
||||
close(c.stop)
|
||||
}
|
||||
c.conn.Close()
|
||||
c.workers.Wait()
|
||||
close(c.stopRouter)
|
||||
DEBUG.Println(CLI, "disconnected")
|
||||
c.persist.Close()
|
||||
}
|
||||
|
||||
// Publish will publish a message with the specified QoS
|
||||
// and content to the specified topic.
|
||||
// Returns a read only channel used to track
|
||||
// the delivery of the message.
|
||||
func (c *Client) Publish(topic string, qos byte, retained bool, payload interface{}) Token {
|
||||
token := newToken(packets.Publish).(*PublishToken)
|
||||
DEBUG.Println(CLI, "enter Publish")
|
||||
if !c.IsConnected() {
|
||||
token.err = ErrNotConnected
|
||||
token.flowComplete()
|
||||
return token
|
||||
}
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = qos
|
||||
pub.TopicName = topic
|
||||
pub.Retain = retained
|
||||
switch payload.(type) {
|
||||
case string:
|
||||
pub.Payload = []byte(payload.(string))
|
||||
case []byte:
|
||||
pub.Payload = payload.([]byte)
|
||||
default:
|
||||
token.err = errors.New("Unknown payload type")
|
||||
token.flowComplete()
|
||||
return token
|
||||
}
|
||||
|
||||
DEBUG.Println(CLI, "sending publish message, topic:", topic)
|
||||
c.obound <- &PacketAndToken{p: pub, t: token}
|
||||
return token
|
||||
}
|
||||
|
||||
// Subscribe starts a new subscription. Provide a MessageHandler to be executed when
|
||||
// a message is published on the topic provided.
|
||||
func (c *Client) Subscribe(topic string, qos byte, callback MessageHandler) Token {
|
||||
token := newToken(packets.Subscribe).(*SubscribeToken)
|
||||
DEBUG.Println(CLI, "enter Subscribe")
|
||||
if !c.IsConnected() {
|
||||
token.err = ErrNotConnected
|
||||
token.flowComplete()
|
||||
return token
|
||||
}
|
||||
sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
|
||||
if err := validateTopicAndQos(topic, qos); err != nil {
|
||||
token.err = err
|
||||
return token
|
||||
}
|
||||
sub.Topics = append(sub.Topics, topic)
|
||||
sub.Qoss = append(sub.Qoss, qos)
|
||||
DEBUG.Println(sub.String())
|
||||
|
||||
if callback != nil {
|
||||
c.msgRouter.addRoute(topic, callback)
|
||||
}
|
||||
|
||||
token.subs = append(token.subs, topic)
|
||||
c.oboundP <- &PacketAndToken{p: sub, t: token}
|
||||
DEBUG.Println(CLI, "exit Subscribe")
|
||||
return token
|
||||
}
|
||||
|
||||
// SubscribeMultiple starts a new subscription for multiple topics. Provide a MessageHandler to
|
||||
// be executed when a message is published on one of the topics provided.
|
||||
func (c *Client) SubscribeMultiple(filters map[string]byte, callback MessageHandler) Token {
|
||||
var err error
|
||||
token := newToken(packets.Subscribe).(*SubscribeToken)
|
||||
DEBUG.Println(CLI, "enter SubscribeMultiple")
|
||||
if !c.IsConnected() {
|
||||
token.err = ErrNotConnected
|
||||
token.flowComplete()
|
||||
return token
|
||||
}
|
||||
sub := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
|
||||
if sub.Topics, sub.Qoss, err = validateSubscribeMap(filters); err != nil {
|
||||
token.err = err
|
||||
return token
|
||||
}
|
||||
|
||||
if callback != nil {
|
||||
for topic := range filters {
|
||||
c.msgRouter.addRoute(topic, callback)
|
||||
}
|
||||
}
|
||||
token.subs = make([]string, len(sub.Topics))
|
||||
copy(token.subs, sub.Topics)
|
||||
c.oboundP <- &PacketAndToken{p: sub, t: token}
|
||||
DEBUG.Println(CLI, "exit SubscribeMultiple")
|
||||
return token
|
||||
}
|
||||
|
||||
// Unsubscribe will end the subscription from each of the topics provided.
|
||||
// Messages published to those topics from other clients will no longer be
|
||||
// received.
|
||||
func (c *Client) Unsubscribe(topics ...string) Token {
|
||||
token := newToken(packets.Unsubscribe).(*UnsubscribeToken)
|
||||
DEBUG.Println(CLI, "enter Unsubscribe")
|
||||
if !c.IsConnected() {
|
||||
token.err = ErrNotConnected
|
||||
token.flowComplete()
|
||||
return token
|
||||
}
|
||||
unsub := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
|
||||
unsub.Topics = make([]string, len(topics))
|
||||
copy(unsub.Topics, topics)
|
||||
|
||||
c.oboundP <- &PacketAndToken{p: unsub, t: token}
|
||||
for _, topic := range topics {
|
||||
c.msgRouter.deleteRoute(topic)
|
||||
}
|
||||
|
||||
DEBUG.Println(CLI, "exit Unsubscribe")
|
||||
return token
|
||||
}
|
||||
|
||||
//DefaultConnectionLostHandler is a definition of a function that simply
|
||||
//reports to the DEBUG log the reason for the client losing a connection.
|
||||
func DefaultConnectionLostHandler(client *Client, reason error) {
|
||||
DEBUG.Println("Connection lost:", reason.Error())
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
type component string
|
||||
|
||||
// Component names for debug output
|
||||
const (
|
||||
NET component = "[net] "
|
||||
PNG component = "[pinger] "
|
||||
CLI component = "[client] "
|
||||
DEC component = "[decode] "
|
||||
MES component = "[message] "
|
||||
STR component = "[store] "
|
||||
MID component = "[msgids] "
|
||||
TST component = "[test] "
|
||||
STA component = "[state] "
|
||||
ERR component = "[error] "
|
||||
)
|
|
@ -1,15 +0,0 @@
|
|||
|
||||
Eclipse Distribution License - v 1.0
|
||||
|
||||
Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -1,70 +0,0 @@
|
|||
Eclipse Public License - v 1.0
|
||||
|
||||
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
|
||||
|
||||
1. DEFINITIONS
|
||||
|
||||
"Contribution" means:
|
||||
|
||||
a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
|
||||
b) in the case of each subsequent Contributor:
|
||||
i) changes to the Program, and
|
||||
ii) additions to the Program;
|
||||
where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
|
||||
"Contributor" means any person or entity that distributes the Program.
|
||||
|
||||
"Licensed Patents" mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
|
||||
|
||||
"Program" means the Contributions distributed in accordance with this Agreement.
|
||||
|
||||
"Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
|
||||
|
||||
2. GRANT OF RIGHTS
|
||||
|
||||
a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
|
||||
b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
|
||||
c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
|
||||
d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
|
||||
3. REQUIREMENTS
|
||||
|
||||
A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
|
||||
|
||||
a) it complies with the terms and conditions of this Agreement; and
|
||||
b) its license agreement:
|
||||
i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
|
||||
ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
|
||||
iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
|
||||
iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
|
||||
When the Program is made available in source code form:
|
||||
|
||||
a) it must be made available under this Agreement; and
|
||||
b) a copy of this Agreement must be included with each copy of the Program.
|
||||
Contributors may not remove or alter any copyright notices contained within the Program.
|
||||
|
||||
Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
|
||||
|
||||
4. COMMERCIAL DISTRIBUTION
|
||||
|
||||
Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
|
||||
|
||||
For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
|
||||
|
||||
5. NO WARRANTY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement , including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
|
||||
|
||||
6. DISCLAIMER OF LIABILITY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. GENERAL
|
||||
|
||||
If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
|
||||
|
||||
If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
|
||||
|
||||
All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
|
||||
|
||||
Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
|
||||
|
||||
This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
|
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
msgExt = ".msg"
|
||||
bkpExt = ".bkp"
|
||||
)
|
||||
|
||||
// FileStore implements the store interface using the filesystem to provide
|
||||
// true persistence, even across client failure. This is designed to use a
|
||||
// single directory per running client. If you are running multiple clients
|
||||
// on the same filesystem, you will need to be careful to specify unique
|
||||
// store directories for each.
|
||||
type FileStore struct {
|
||||
sync.RWMutex
|
||||
directory string
|
||||
opened bool
|
||||
}
|
||||
|
||||
// NewFileStore will create a new FileStore which stores its messages in the
|
||||
// directory provided.
|
||||
func NewFileStore(directory string) *FileStore {
|
||||
store := &FileStore{
|
||||
directory: directory,
|
||||
opened: false,
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Open will allow the FileStore to be used.
|
||||
func (store *FileStore) Open() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
// if no store directory was specified in ClientOpts, by default use the
|
||||
// current working directory
|
||||
if store.directory == "" {
|
||||
store.directory, _ = os.Getwd()
|
||||
}
|
||||
|
||||
// if store dir exists, great, otherwise, create it
|
||||
if !exists(store.directory) {
|
||||
perms := os.FileMode(0770)
|
||||
merr := os.MkdirAll(store.directory, perms)
|
||||
chkerr(merr)
|
||||
}
|
||||
store.opened = true
|
||||
DEBUG.Println(STR, "store is opened at", store.directory)
|
||||
}
|
||||
|
||||
// Close will disallow the FileStore from being used.
|
||||
func (store *FileStore) Close() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.opened = false
|
||||
WARN.Println(STR, "store is not open")
|
||||
}
|
||||
|
||||
// Put will put a message into the store, associated with the provided
|
||||
// key value.
|
||||
func (store *FileStore) Put(key string, m packets.ControlPacket) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
chkcond(store.opened)
|
||||
full := fullpath(store.directory, key)
|
||||
if exists(full) {
|
||||
backup(store.directory, key) // make a copy of what already exists
|
||||
defer unbackup(store.directory, key)
|
||||
}
|
||||
write(store.directory, key, m)
|
||||
chkcond(exists(full))
|
||||
}
|
||||
|
||||
// Get will retrieve a message from the store, the one associated with
|
||||
// the provided key value.
|
||||
func (store *FileStore) Get(key string) packets.ControlPacket {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
chkcond(store.opened)
|
||||
filepath := fullpath(store.directory, key)
|
||||
if !exists(filepath) {
|
||||
return nil
|
||||
}
|
||||
mfile, oerr := os.Open(filepath)
|
||||
chkerr(oerr)
|
||||
//all, rerr := ioutil.ReadAll(mfile)
|
||||
//chkerr(rerr)
|
||||
msg, rerr := packets.ReadPacket(mfile)
|
||||
chkerr(rerr)
|
||||
cerr := mfile.Close()
|
||||
chkerr(cerr)
|
||||
return msg
|
||||
}
|
||||
|
||||
// All will provide a list of all of the keys associated with messages
|
||||
// currenly residing in the FileStore.
|
||||
func (store *FileStore) All() []string {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
return store.all()
|
||||
}
|
||||
|
||||
// Del will remove the persisted message associated with the provided
|
||||
// key from the FileStore.
|
||||
func (store *FileStore) Del(key string) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.del(key)
|
||||
}
|
||||
|
||||
// Reset will remove all persisted messages from the FileStore.
|
||||
func (store *FileStore) Reset() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
WARN.Println(STR, "FileStore Reset")
|
||||
for _, key := range store.all() {
|
||||
store.del(key)
|
||||
}
|
||||
}
|
||||
|
||||
// lockless
|
||||
func (store *FileStore) all() []string {
|
||||
chkcond(store.opened)
|
||||
keys := []string{}
|
||||
files, rderr := ioutil.ReadDir(store.directory)
|
||||
chkerr(rderr)
|
||||
for _, f := range files {
|
||||
DEBUG.Println(STR, "file in All():", f.Name())
|
||||
key := f.Name()[0 : len(f.Name())-4] // remove file extension
|
||||
keys = append(keys, key)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// lockless
|
||||
func (store *FileStore) del(key string) {
|
||||
chkcond(store.opened)
|
||||
DEBUG.Println(STR, "store del filepath:", store.directory)
|
||||
DEBUG.Println(STR, "store delete key:", key)
|
||||
filepath := fullpath(store.directory, key)
|
||||
DEBUG.Println(STR, "path of deletion:", filepath)
|
||||
if !exists(filepath) {
|
||||
WARN.Println(STR, "store could not delete key:", key)
|
||||
return
|
||||
}
|
||||
rerr := os.Remove(filepath)
|
||||
chkerr(rerr)
|
||||
DEBUG.Println(STR, "del msg:", key)
|
||||
chkcond(!exists(filepath))
|
||||
}
|
||||
|
||||
func fullpath(store string, key string) string {
|
||||
p := path.Join(store, key+msgExt)
|
||||
return p
|
||||
}
|
||||
|
||||
func bkppath(store string, key string) string {
|
||||
p := path.Join(store, key+bkpExt)
|
||||
return p
|
||||
}
|
||||
|
||||
// create file called "X.[messageid].msg" located in the store
|
||||
// the contents of the file is the bytes of the message
|
||||
// if a message with m's message id already exists, it will
|
||||
// be overwritten
|
||||
// X will be 'i' for inbound messages, and O for outbound messages
|
||||
func write(store, key string, m packets.ControlPacket) {
|
||||
filepath := fullpath(store, key)
|
||||
f, err := os.Create(filepath)
|
||||
chkerr(err)
|
||||
werr := m.Write(f)
|
||||
chkerr(werr)
|
||||
cerr := f.Close()
|
||||
chkerr(cerr)
|
||||
}
|
||||
|
||||
func exists(file string) bool {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
chkerr(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func backup(store, key string) {
|
||||
bkpp := bkppath(store, key)
|
||||
fulp := fullpath(store, key)
|
||||
backup, err := os.Create(bkpp)
|
||||
chkerr(err)
|
||||
mfile, oerr := os.Open(fulp)
|
||||
chkerr(oerr)
|
||||
_, cerr := io.Copy(backup, mfile)
|
||||
chkerr(cerr)
|
||||
clberr := backup.Close()
|
||||
chkerr(clberr)
|
||||
clmerr := mfile.Close()
|
||||
chkerr(clmerr)
|
||||
}
|
||||
|
||||
// Identify .bkp files in the store and turn them into .msg files,
|
||||
// whether or not it overwrites an existing file. This is safe because
|
||||
// I'm copying the Paho Java client and they say it is.
|
||||
func restore(store string) {
|
||||
files, rderr := ioutil.ReadDir(store)
|
||||
chkerr(rderr)
|
||||
for _, f := range files {
|
||||
fname := f.Name()
|
||||
if len(fname) > 4 {
|
||||
if fname[len(fname)-4:] == bkpExt {
|
||||
key := fname[0 : len(fname)-4]
|
||||
fulp := fullpath(store, key)
|
||||
msg, cerr := os.Create(fulp)
|
||||
chkerr(cerr)
|
||||
bkpp := path.Join(store, fname)
|
||||
bkp, oerr := os.Open(bkpp)
|
||||
chkerr(oerr)
|
||||
n, cerr := io.Copy(msg, bkp)
|
||||
chkerr(cerr)
|
||||
chkcond(n > 0)
|
||||
clmerr := msg.Close()
|
||||
chkerr(clmerr)
|
||||
clberr := bkp.Close()
|
||||
chkerr(clberr)
|
||||
remerr := os.Remove(bkpp)
|
||||
chkerr(remerr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unbackup(store, key string) {
|
||||
bkpp := bkppath(store, key)
|
||||
remerr := os.Remove(bkpp)
|
||||
chkerr(remerr)
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
FVT Instructions
|
||||
================
|
||||
|
||||
The FVT tests are currenly only supported by [IBM MessageSight](http://www-03.ibm.com/software/products/us/en/messagesight/).
|
||||
|
||||
Support for [mosquitto](http://mosquitto.org/) and [IBM Really Small Message Broker](https://www.ibm.com/developerworks/community/groups/service/html/communityview?communityUuid=d5bedadd-e46f-4c97-af89-22d65ffee070) might be added in the future.
|
||||
|
||||
|
||||
IBM MessageSight Configuration
|
||||
------------------------------
|
||||
|
||||
The IBM MessageSight Virtual Appliance can be downloaded here:
|
||||
[Download](http://www-933.ibm.com/support/fixcentral/swg/selectFixes?parent=ibm~Other+software&product=ibm/Other+software/MessageSight&function=fixId&fixids=1.0.0.1-IMA-DeveloperImage&includeSupersedes=0 "IBM MessageSight")
|
||||
|
||||
There is a nice blog post about it here:
|
||||
[Blog](https://www.ibm.com/developerworks/community/blogs/c565c720-fe84-4f63-873f-607d87787327/entry/ibm_messagesight_for_developers_is_here?lang=en "Blog")
|
||||
|
||||
|
||||
The virtual appliance must be installed into a virtual machine like
|
||||
Oracle VirtualBox or VMWare Player. (Follow the instructions that come
|
||||
with the download).
|
||||
|
||||
Next, copy your authorized keys (basically a file containing the public
|
||||
rsa key of your own computer) onto the appliance to enable passwordless ssh.
|
||||
|
||||
For example,
|
||||
|
||||
Console> user sshkey add "scp://user@host:~/.ssh/authorized_keys"
|
||||
|
||||
More information can be found in the IBM MessageSight InfoCenter:
|
||||
[InfoCenter](https://infocenters.hursley.ibm.com/ism/v1/help/index.jsp "InfoCenter")
|
||||
|
||||
Now, execute the script setup_IMA.sh to create the objects necessary
|
||||
to configure the server for the unit test cases provided.
|
||||
|
||||
For example,
|
||||
|
||||
./setup_IMA.sh
|
||||
|
||||
You should now be able to view the objects on your server:
|
||||
|
||||
Console> imaserver show Endpoint Name=GoMqttEP1
|
||||
Name = GoMqttEP1
|
||||
Enabled = True
|
||||
Port = 17001
|
||||
Protocol = MQTT
|
||||
Interface = all
|
||||
SecurityProfile =
|
||||
ConnectionPolicies = GoMqttCP1
|
||||
MessagingPolicies = GoMqttMP1
|
||||
MaxMessageSize = 1024KB
|
||||
MessageHub = GoMqttTestHub
|
||||
Description =
|
||||
|
||||
|
||||
|
||||
RSMB Configuration
|
||||
------------------
|
||||
Wait for SSL support?
|
||||
|
||||
|
||||
Mosquitto Configuration
|
||||
-----------------------
|
||||
Launch mosquitto from the fvt directory, specifiying mosquitto.cfg as config file
|
||||
|
||||
``ex: /usr/bin/mosquitto -c ./mosquitto.cfg``
|
||||
|
||||
Note: Mosquitto requires SSL 1.1 or better, while Go 1.1.2 supports
|
||||
only SSL v1.0. However, Go 1.2+ supports SSL v1.1 and SSL v1.2.
|
||||
|
||||
|
||||
Other Notes
|
||||
-----------
|
||||
Go 1.1.2 does not support intermediate certificates, however Go 1.2+ does.
|
|
@ -1,17 +0,0 @@
|
|||
allow_anonymous true
|
||||
allow_duplicate_messages false
|
||||
connection_messages true
|
||||
log_dest stdout
|
||||
log_timestamp true
|
||||
log_type all
|
||||
persistence false
|
||||
bind_address 127.0.0.1
|
||||
|
||||
listener 17001
|
||||
listener 17002
|
||||
listener 17003
|
||||
listener 17004
|
||||
|
||||
#capath ../samples/samplecerts
|
||||
#certfile ../samples/samplecerts/server-crt.pem
|
||||
#keyfile ../samples/samplecerts/server-key.pem
|
|
@ -1,8 +0,0 @@
|
|||
allow_anonymous false
|
||||
bind_address 127.0.0.1
|
||||
connection_messages true
|
||||
log_level detail
|
||||
|
||||
listener 17001
|
||||
#listener 17003
|
||||
#listener 17004
|
|
@ -1,111 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
#######################################################################
|
||||
# This script is for configuring your IBM Messaging Appliance for use #
|
||||
# as an mqtt test server for testing the go-mqtt open source client. #
|
||||
# It creates the Policies and Endpoints necessary to test particular #
|
||||
# features of the client, such as IPv6, SSL, and other things #
|
||||
# #
|
||||
# You do not need this script for any other purpose. #
|
||||
#######################################################################
|
||||
|
||||
# Edit options to match your configuration
|
||||
IMA_HOST=9.41.55.184
|
||||
IMA_USER=admin
|
||||
HOST=9.41.55.146
|
||||
USER=root
|
||||
CERTDIR=~/GO/src/github.com/shoenig/go-mqtt/samples/samplecerts
|
||||
|
||||
echo 'Configuring your IBM Messaging Appliance for testing go-mqtt'
|
||||
echo 'IMA_HOST: ' $IMA_HOST
|
||||
|
||||
|
||||
function ima {
|
||||
reply=`ssh $IMA_USER@$IMA_HOST imaserver $@`
|
||||
}
|
||||
|
||||
function imp {
|
||||
reply=`ssh $IMA_USER@$IMA_HOST file get $@`
|
||||
}
|
||||
|
||||
ima create MessageHub Name=GoMqttTestHub
|
||||
|
||||
# Config "1" is a basic, open endpoint, port 17001
|
||||
ima create MessagingPolicy \
|
||||
Name=GoMqttMP1 \
|
||||
Protocol=MQTT \
|
||||
ActionList=Publish,Subscribe \
|
||||
MaxMessages=100000 \
|
||||
DestinationType=Topic \
|
||||
Destination=*
|
||||
|
||||
ima create ConnectionPolicy \
|
||||
Name=GoMqttCP1 \
|
||||
Protocol=MQTT
|
||||
|
||||
ima create Endpoint \
|
||||
Name=GoMqttEP1 \
|
||||
Protocol=MQTT \
|
||||
MessageHub=GoMqttTestHub \
|
||||
ConnectionPolicies=GoMqttCP1 \
|
||||
MessagingPolicies=GoMqttMP1 \
|
||||
Port=17001
|
||||
|
||||
# Config "2" is IPv6 only , port 17002
|
||||
|
||||
# Config "3" is for authorization failures, port 17003
|
||||
ima create ConnectionPolicy \
|
||||
Name=GoMqttCP2 \
|
||||
Protocol=MQTT \
|
||||
ClientID=GoMqttClient
|
||||
|
||||
ima create Endpoint \
|
||||
Name=GoMqttEP3 \
|
||||
Protocol=MQTT \
|
||||
MessageHub=GoMqttTestHub \
|
||||
ConnectionPolicies=GoMqttCP2 \
|
||||
MessagingPolicies=GoMqttMP1 \
|
||||
Port=17003
|
||||
|
||||
# Config "4" is secure connections, port 17004
|
||||
imp scp://$USER@$HOST:${CERTDIR}/server-crt.pem .
|
||||
imp scp://$USER@$HOST:${CERTDIR}/server-key.pem .
|
||||
imp scp://$USER@$HOST:${CERTDIR}/rootCA-crt.pem .
|
||||
imp scp://$USER@$HOST:${CERTDIR}/intermediateCA-crt.pem .
|
||||
|
||||
ima apply Certificate \
|
||||
CertFileName=server-crt.pem \
|
||||
"CertFilePassword=" \
|
||||
KeyFileName=server-key.pem \
|
||||
"KeyFilePassword="
|
||||
|
||||
ima create CertificateProfile \
|
||||
Name=GoMqttCertProf \
|
||||
Certificate=server-crt.pem \
|
||||
Key=server-key.pem
|
||||
|
||||
ima create SecurityProfile \
|
||||
Name=GoMqttSecProf \
|
||||
MinimumProtocolMethod=SSLv3 \
|
||||
UseClientCertificate=True \
|
||||
UsePasswordAuthentication=False \
|
||||
Ciphers=Fast \
|
||||
CertificateProfile=GoMqttCertProf
|
||||
|
||||
ima apply Certificate \
|
||||
TrustedCertificate=rootCA-crt.pem \
|
||||
SecurityProfileName=GoMqttSecProf
|
||||
|
||||
ima apply Certificate \
|
||||
TrustedCertificate=intermediateCA-crt.pem \
|
||||
SecurityProfileName=GoMqttSecProf
|
||||
|
||||
ima create Endpoint \
|
||||
Name=GoMqttEP4 \
|
||||
Port=17004 \
|
||||
MessageHub=GoMqttTestHub \
|
||||
ConnectionPolicies=GoMqttCP1 \
|
||||
MessagingPolicies=GoMqttMP1 \
|
||||
SecurityProfile=GoMqttSecProf \
|
||||
Protocol=MQTT
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -1,496 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/*******************************
|
||||
**** Some helper functions ****
|
||||
*******************************/
|
||||
|
||||
func b2s(bs []byte) string {
|
||||
s := ""
|
||||
for _, b := range bs {
|
||||
s += fmt.Sprintf("%x ", b)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
/**********************************************
|
||||
**** A mock store implementation for test ****
|
||||
**********************************************/
|
||||
|
||||
type TestStore struct {
|
||||
mput []uint16
|
||||
mget []uint16
|
||||
mdel []uint16
|
||||
}
|
||||
|
||||
func (ts *TestStore) Open() {
|
||||
}
|
||||
|
||||
func (ts *TestStore) Close() {
|
||||
}
|
||||
|
||||
func (ts *TestStore) Put(key string, m packets.ControlPacket) {
|
||||
ts.mput = append(ts.mput, m.Details().MessageID)
|
||||
}
|
||||
|
||||
func (ts *TestStore) Get(key string) packets.ControlPacket {
|
||||
mid := mIDFromKey(key)
|
||||
ts.mget = append(ts.mget, mid)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *TestStore) All() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *TestStore) Del(key string) {
|
||||
mid := mIDFromKey(key)
|
||||
ts.mdel = append(ts.mdel, mid)
|
||||
}
|
||||
|
||||
func (ts *TestStore) Reset() {
|
||||
}
|
||||
|
||||
/*******************
|
||||
**** FileStore ****
|
||||
*******************/
|
||||
|
||||
func Test_NewFileStore(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_new"
|
||||
f := NewFileStore(storedir)
|
||||
if f.opened {
|
||||
t.Fatalf("filestore was opened without opening it")
|
||||
}
|
||||
if f.directory != storedir {
|
||||
t.Fatalf("filestore directory is wrong")
|
||||
}
|
||||
// storedir might exist or might not, just like with a real client
|
||||
// the point is, we don't care, we just want it to exist after it is
|
||||
// opened
|
||||
}
|
||||
|
||||
func Test_FileStore_Open(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_open"
|
||||
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
if !f.opened {
|
||||
t.Fatalf("filestore was not set open")
|
||||
}
|
||||
if f.directory != storedir {
|
||||
t.Fatalf("filestore directory is wrong")
|
||||
}
|
||||
if !exists(storedir) {
|
||||
t.Fatalf("filestore directory does not exst after opening it")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FileStore_Close(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_unopen"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
if !f.opened {
|
||||
t.Fatalf("filestore was not set open")
|
||||
}
|
||||
if f.directory != storedir {
|
||||
t.Fatalf("filestore directory is wrong")
|
||||
}
|
||||
if !exists(storedir) {
|
||||
t.Fatalf("filestore directory does not exst after opening it")
|
||||
}
|
||||
|
||||
f.Close()
|
||||
if f.opened {
|
||||
t.Fatalf("filestore was still open after unopen")
|
||||
}
|
||||
if !exists(storedir) {
|
||||
t.Fatalf("filestore was deleted after unopen")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FileStore_write(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_write"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 91
|
||||
|
||||
key := inboundKeyFromMID(pm.MessageID)
|
||||
f.Put(key, pm)
|
||||
|
||||
if !exists(storedir + "/i.91.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_FileStore_Get(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_get"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "/a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 120
|
||||
|
||||
key := outboundKeyFromMID(pm.MessageID)
|
||||
f.Put(key, pm)
|
||||
|
||||
if !exists(storedir + "/o.120.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
exp := []byte{
|
||||
/* msg type */
|
||||
0x32, // qos 1
|
||||
|
||||
/* remlen */
|
||||
0x0d,
|
||||
|
||||
/* topic, msg id in varheader */
|
||||
0x00, // length of topic
|
||||
0x06,
|
||||
0x2F, // /
|
||||
0x61, // a
|
||||
0x2F, // /
|
||||
0x62, // b
|
||||
0x2F, // /
|
||||
0x63, // c
|
||||
|
||||
/* msg id (is always 2 bytes) */
|
||||
0x00,
|
||||
0x78,
|
||||
|
||||
/*payload */
|
||||
0xBE,
|
||||
0xEF,
|
||||
0xED,
|
||||
}
|
||||
|
||||
m := f.Get(key)
|
||||
|
||||
if m == nil {
|
||||
t.Fatalf("message not retreived from store")
|
||||
}
|
||||
|
||||
var msg bytes.Buffer
|
||||
m.Write(&msg)
|
||||
if !bytes.Equal(exp, msg.Bytes()) {
|
||||
t.Fatal("message from store not same as what went in", msg.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FileStore_All(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_all"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 2
|
||||
pm.TopicName = "/t/r/v"
|
||||
pm.Payload = []byte{0x01, 0x02}
|
||||
pm.MessageID = 121
|
||||
|
||||
key := outboundKeyFromMID(pm.MessageID)
|
||||
f.Put(key, pm)
|
||||
|
||||
keys := f.All()
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("FileStore.All does not have the messages")
|
||||
}
|
||||
|
||||
if keys[0] != "o.121" {
|
||||
t.Fatalf("FileStore.All has wrong key")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FileStore_Del(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_del"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 17
|
||||
|
||||
key := inboundKeyFromMID(pm.MessageID)
|
||||
f.Put(key, pm)
|
||||
|
||||
if !exists(storedir + "/i.17.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
f.Del(key)
|
||||
|
||||
if exists(storedir + "/i.17.msg") {
|
||||
t.Fatalf("message still exists after deletion")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_FileStore_Reset(t *testing.T) {
|
||||
storedir := "/tmp/TestStore/_reset"
|
||||
f := NewFileStore(storedir)
|
||||
f.Open()
|
||||
|
||||
pm1 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm1.Qos = 1
|
||||
pm1.TopicName = "/q/w/e"
|
||||
pm1.Payload = []byte{0xBB}
|
||||
pm1.MessageID = 71
|
||||
key1 := inboundKeyFromMID(pm1.MessageID)
|
||||
f.Put(key1, pm1)
|
||||
|
||||
pm2 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm2.Qos = 1
|
||||
pm2.TopicName = "/q/w/e"
|
||||
pm2.Payload = []byte{0xBB}
|
||||
pm2.MessageID = 72
|
||||
key2 := inboundKeyFromMID(pm2.MessageID)
|
||||
f.Put(key2, pm2)
|
||||
|
||||
pm3 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm3.Qos = 1
|
||||
pm3.TopicName = "/q/w/e"
|
||||
pm3.Payload = []byte{0xBB}
|
||||
pm3.MessageID = 73
|
||||
key3 := inboundKeyFromMID(pm3.MessageID)
|
||||
f.Put(key3, pm3)
|
||||
|
||||
pm4 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm4.Qos = 1
|
||||
pm4.TopicName = "/q/w/e"
|
||||
pm4.Payload = []byte{0xBB}
|
||||
pm4.MessageID = 74
|
||||
key4 := inboundKeyFromMID(pm4.MessageID)
|
||||
f.Put(key4, pm4)
|
||||
|
||||
pm5 := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm5.Qos = 1
|
||||
pm5.TopicName = "/q/w/e"
|
||||
pm5.Payload = []byte{0xBB}
|
||||
pm5.MessageID = 75
|
||||
key5 := inboundKeyFromMID(pm5.MessageID)
|
||||
f.Put(key5, pm5)
|
||||
|
||||
if !exists(storedir + "/i.71.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
if !exists(storedir + "/i.72.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
if !exists(storedir + "/i.73.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
if !exists(storedir + "/i.74.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
if !exists(storedir + "/i.75.msg") {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
f.Reset()
|
||||
|
||||
if exists(storedir + "/i.71.msg") {
|
||||
t.Fatalf("message still exists after reset")
|
||||
}
|
||||
|
||||
if exists(storedir + "/i.72.msg") {
|
||||
t.Fatalf("message still exists after reset")
|
||||
}
|
||||
|
||||
if exists(storedir + "/i.73.msg") {
|
||||
t.Fatalf("message still exists after reset")
|
||||
}
|
||||
|
||||
if exists(storedir + "/i.74.msg") {
|
||||
t.Fatalf("message still exists after reset")
|
||||
}
|
||||
|
||||
if exists(storedir + "/i.75.msg") {
|
||||
t.Fatalf("message still exists after reset")
|
||||
}
|
||||
}
|
||||
|
||||
/*******************
|
||||
*** MemoryStore ***
|
||||
*******************/
|
||||
|
||||
func Test_NewMemoryStore(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
if m == nil {
|
||||
t.Fatalf("MemoryStore could not be created")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_Open(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
if !m.opened {
|
||||
t.Fatalf("MemoryStore was not set open")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_Close(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
if !m.opened {
|
||||
t.Fatalf("MemoryStore was not set open")
|
||||
}
|
||||
|
||||
m.Close()
|
||||
if m.opened {
|
||||
t.Fatalf("MemoryStore was still open after unopen")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_Reset(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 2
|
||||
pm.TopicName = "/f/r/s"
|
||||
pm.Payload = []byte{0xAB}
|
||||
pm.MessageID = 81
|
||||
|
||||
key := outboundKeyFromMID(pm.MessageID)
|
||||
m.Put(key, pm)
|
||||
|
||||
if len(m.messages) != 1 {
|
||||
t.Fatalf("message not in memstore")
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
|
||||
if len(m.messages) != 0 {
|
||||
t.Fatalf("reset did not clear memstore")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_write(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "/a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 91
|
||||
key := inboundKeyFromMID(pm.MessageID)
|
||||
m.Put(key, pm)
|
||||
|
||||
if len(m.messages) != 1 {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_Get(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "/a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 120
|
||||
|
||||
key := outboundKeyFromMID(pm.MessageID)
|
||||
m.Put(key, pm)
|
||||
|
||||
if len(m.messages) != 1 {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
exp := []byte{
|
||||
/* msg type */
|
||||
0x32, // qos 1
|
||||
|
||||
/* remlen */
|
||||
0x0d,
|
||||
|
||||
/* topic, msg id in varheader */
|
||||
0x00, // length of topic
|
||||
0x06,
|
||||
0x2F, // /
|
||||
0x61, // a
|
||||
0x2F, // /
|
||||
0x62, // b
|
||||
0x2F, // /
|
||||
0x63, // c
|
||||
|
||||
/* msg id (is always 2 bytes) */
|
||||
0x00,
|
||||
0x78,
|
||||
|
||||
/*payload */
|
||||
0xBE,
|
||||
0xEF,
|
||||
0xED,
|
||||
}
|
||||
|
||||
msg := m.Get(key)
|
||||
|
||||
if msg == nil {
|
||||
t.Fatalf("message not retreived from store")
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
msg.Write(&buf)
|
||||
if !bytes.Equal(exp, buf.Bytes()) {
|
||||
t.Fatalf("message from store not same as what went in")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MemoryStore_Del(t *testing.T) {
|
||||
m := NewMemoryStore()
|
||||
m.Open()
|
||||
|
||||
pm := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pm.Qos = 1
|
||||
pm.TopicName = "/a/b/c"
|
||||
pm.Payload = []byte{0xBE, 0xEF, 0xED}
|
||||
pm.MessageID = 17
|
||||
|
||||
key := outboundKeyFromMID(pm.MessageID)
|
||||
|
||||
m.Put(key, pm)
|
||||
|
||||
if len(m.messages) != 1 {
|
||||
t.Fatalf("message not in store")
|
||||
}
|
||||
|
||||
m.Del(key)
|
||||
|
||||
if len(m.messages) != 1 {
|
||||
t.Fatalf("message still exists after deletion")
|
||||
}
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
// Use setup_IMA.sh for IBM's MessageSight
|
||||
// Use fvt/rsmb.cfg for IBM's Really Small Message Broker
|
||||
// Use fvt/mosquitto.cfg for the open source Mosquitto project
|
||||
|
||||
// Set these values to the URI of your MQTT Broker before running go-test
|
||||
const (
|
||||
FVTAddr = "iot.eclipse.org"
|
||||
FVTTCP = "tcp://" + FVTAddr + ":1883"
|
||||
FVTSSL = "ssl://" + FVTAddr + ":8883"
|
||||
)
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MemoryStore implements the store interface to provide a "persistence"
|
||||
// mechanism wholly stored in memory. This is only useful for
|
||||
// as long as the client instance exists.
|
||||
type MemoryStore struct {
|
||||
sync.RWMutex
|
||||
messages map[string]packets.ControlPacket
|
||||
opened bool
|
||||
}
|
||||
|
||||
// NewMemoryStore returns a pointer to a new instance of
|
||||
// MemoryStore, the instance is not initialized and ready to
|
||||
// use until Open() has been called on it.
|
||||
func NewMemoryStore() *MemoryStore {
|
||||
store := &MemoryStore{
|
||||
messages: make(map[string]packets.ControlPacket),
|
||||
opened: false,
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Open initializes a MemoryStore instance.
|
||||
func (store *MemoryStore) Open() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
store.opened = true
|
||||
DEBUG.Println(STR, "memorystore initialized")
|
||||
}
|
||||
|
||||
// Put takes a key and a pointer to a Message and stores the
|
||||
// message.
|
||||
func (store *MemoryStore) Put(key string, message packets.ControlPacket) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
chkcond(store.opened)
|
||||
store.messages[key] = message
|
||||
}
|
||||
|
||||
// Get takes a key and looks in the store for a matching Message
|
||||
// returning either the Message pointer or nil.
|
||||
func (store *MemoryStore) Get(key string) packets.ControlPacket {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
chkcond(store.opened)
|
||||
mid := mIDFromKey(key)
|
||||
m := store.messages[key]
|
||||
if m == nil {
|
||||
CRITICAL.Println(STR, "memorystore get: message", mid, "not found")
|
||||
} else {
|
||||
DEBUG.Println(STR, "memorystore get: message", mid, "found")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// All returns a slice of strings containing all the keys currently
|
||||
// in the MemoryStore.
|
||||
func (store *MemoryStore) All() []string {
|
||||
store.RLock()
|
||||
defer store.RUnlock()
|
||||
chkcond(store.opened)
|
||||
keys := []string{}
|
||||
for k := range store.messages {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Del takes a key, searches the MemoryStore and if the key is found
|
||||
// deletes the Message pointer associated with it.
|
||||
func (store *MemoryStore) Del(key string) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
mid := mIDFromKey(key)
|
||||
m := store.messages[key]
|
||||
if m == nil {
|
||||
WARN.Println(STR, "memorystore del: message", mid, "not found")
|
||||
} else {
|
||||
store.messages[key] = nil
|
||||
DEBUG.Println(STR, "memorystore del: message", mid, "was deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// Close will disallow modifications to the state of the store.
|
||||
func (store *MemoryStore) Close() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
chkcond(store.opened)
|
||||
store.opened = false
|
||||
DEBUG.Println(STR, "memorystore closed")
|
||||
}
|
||||
|
||||
// Reset eliminates all persisted message data in the store.
|
||||
func (store *MemoryStore) Reset() {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
chkcond(store.opened)
|
||||
store.messages = make(map[string]packets.ControlPacket)
|
||||
WARN.Println(STR, "memorystore wiped")
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
)
|
||||
|
||||
// Message defines the externals that a message implementation must support
|
||||
// these are received messages that are passed to the callbacks, not internal
|
||||
// messages
|
||||
type Message interface {
|
||||
Duplicate() bool
|
||||
Qos() byte
|
||||
Retained() bool
|
||||
Topic() string
|
||||
MessageID() uint16
|
||||
Payload() []byte
|
||||
}
|
||||
|
||||
type message struct {
|
||||
duplicate bool
|
||||
qos byte
|
||||
retained bool
|
||||
topic string
|
||||
messageID uint16
|
||||
payload []byte
|
||||
}
|
||||
|
||||
func (m *message) Duplicate() bool {
|
||||
return m.duplicate
|
||||
}
|
||||
|
||||
func (m *message) Qos() byte {
|
||||
return m.qos
|
||||
}
|
||||
|
||||
func (m *message) Retained() bool {
|
||||
return m.retained
|
||||
}
|
||||
|
||||
func (m *message) Topic() string {
|
||||
return m.topic
|
||||
}
|
||||
|
||||
func (m *message) MessageID() uint16 {
|
||||
return m.messageID
|
||||
}
|
||||
|
||||
func (m *message) Payload() []byte {
|
||||
return m.payload
|
||||
}
|
||||
|
||||
func messageFromPublish(p *packets.PublishPacket) Message {
|
||||
return &message{
|
||||
duplicate: p.Dup,
|
||||
qos: p.Qos,
|
||||
retained: p.Retain,
|
||||
topic: p.TopicName,
|
||||
messageID: p.MessageID,
|
||||
payload: p.Payload,
|
||||
}
|
||||
}
|
||||
|
||||
func newConnectMsgFromOptions(options *ClientOptions) *packets.ConnectPacket {
|
||||
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
||||
|
||||
m.CleanSession = options.CleanSession
|
||||
m.WillFlag = options.WillEnabled
|
||||
m.WillRetain = options.WillRetained
|
||||
m.ClientIdentifier = options.ClientID
|
||||
|
||||
if options.WillEnabled {
|
||||
m.WillQos = options.WillQos
|
||||
m.WillTopic = options.WillTopic
|
||||
m.WillMessage = options.WillPayload
|
||||
}
|
||||
|
||||
if options.Username != "" {
|
||||
m.UsernameFlag = true
|
||||
m.Username = options.Username
|
||||
//mustn't have password without user as well
|
||||
if options.Password != "" {
|
||||
m.PasswordFlag = true
|
||||
m.Password = []byte(options.Password)
|
||||
}
|
||||
}
|
||||
|
||||
m.KeepaliveTimer = uint16(options.KeepAlive)
|
||||
|
||||
return m
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MId is 16 bit message id as specified by the MQTT spec.
|
||||
// In general, these values should not be depended upon by
|
||||
// the client application.
|
||||
type MId uint16
|
||||
|
||||
type messageIds struct {
|
||||
sync.RWMutex
|
||||
index map[uint16]Token
|
||||
}
|
||||
|
||||
const (
|
||||
midMin uint16 = 1
|
||||
midMax uint16 = 65535
|
||||
)
|
||||
|
||||
func (mids *messageIds) freeID(id uint16) {
|
||||
mids.Lock()
|
||||
defer mids.Unlock()
|
||||
delete(mids.index, id)
|
||||
}
|
||||
|
||||
func (mids *messageIds) getID(t Token) uint16 {
|
||||
mids.Lock()
|
||||
defer mids.Unlock()
|
||||
for i := midMin; i < midMax; i++ {
|
||||
if _, ok := mids.index[i]; !ok {
|
||||
mids.index[i] = t
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (mids *messageIds) getToken(id uint16) Token {
|
||||
mids.RLock()
|
||||
defer mids.RUnlock()
|
||||
if token, ok := mids.index[id]; ok {
|
||||
return token
|
||||
}
|
||||
return nil
|
||||
}
|
275
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go
generated
vendored
275
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/net.go
generated
vendored
|
@ -1,275 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"golang.org/x/net/websocket"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func openConnection(uri *url.URL, tlsc *tls.Config, timeout time.Duration) (net.Conn, error) {
|
||||
switch uri.Scheme {
|
||||
case "ws":
|
||||
conn, err := websocket.Dial(uri.String(), "mqtt", "ws://localhost")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
return conn, err
|
||||
case "wss":
|
||||
config, _ := websocket.NewConfig(uri.String(), "ws://localhost")
|
||||
config.Protocol = []string{"mqtt"}
|
||||
config.TlsConfig = tlsc
|
||||
conn, err := websocket.DialConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.PayloadType = websocket.BinaryFrame
|
||||
return conn, err
|
||||
case "tcp":
|
||||
conn, err := net.DialTimeout("tcp", uri.Host, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
case "ssl":
|
||||
fallthrough
|
||||
case "tls":
|
||||
fallthrough
|
||||
case "tcps":
|
||||
conn, err := tls.DialWithDialer(&net.Dialer{Timeout: timeout}, "tcp", uri.Host, tlsc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
return nil, errors.New("Unknown protocol")
|
||||
}
|
||||
|
||||
// actually read incoming messages off the wire
|
||||
// send Message object into ibound channel
|
||||
func incoming(c *Client) {
|
||||
defer c.workers.Done()
|
||||
var err error
|
||||
var cp packets.ControlPacket
|
||||
|
||||
DEBUG.Println(NET, "incoming started")
|
||||
|
||||
for {
|
||||
if cp, err = packets.ReadPacket(c.conn); err != nil {
|
||||
break
|
||||
}
|
||||
DEBUG.Println(NET, "Received Message")
|
||||
c.ibound <- cp
|
||||
}
|
||||
// We received an error on read.
|
||||
// If disconnect is in progress, swallow error and return
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(NET, "incoming stopped")
|
||||
return
|
||||
// Not trying to disconnect, send the error to the errors channel
|
||||
default:
|
||||
ERROR.Println(NET, "incoming stopped with error")
|
||||
c.errors <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// receive a Message object on obound, and then
|
||||
// actually send outgoing message to the wire
|
||||
func outgoing(c *Client) {
|
||||
defer c.workers.Done()
|
||||
DEBUG.Println(NET, "outgoing started")
|
||||
|
||||
for {
|
||||
DEBUG.Println(NET, "outgoing waiting for an outbound message")
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(NET, "outgoing stopped")
|
||||
return
|
||||
case pub := <-c.obound:
|
||||
msg := pub.p.(*packets.PublishPacket)
|
||||
if msg.Qos != 0 && msg.MessageID == 0 {
|
||||
msg.MessageID = c.getID(pub.t)
|
||||
pub.t.(*PublishToken).messageID = msg.MessageID
|
||||
}
|
||||
//persist_obound(c.persist, msg)
|
||||
|
||||
if c.options.WriteTimeout > 0 {
|
||||
c.conn.SetWriteDeadline(time.Now().Add(c.options.WriteTimeout))
|
||||
}
|
||||
|
||||
if err := msg.Write(c.conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing stopped with error")
|
||||
c.errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
if c.options.WriteTimeout > 0 {
|
||||
// If we successfully wrote, we don't want the timeout to happen during an idle period
|
||||
// so we reset it to infinite.
|
||||
c.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
if msg.Qos == 0 {
|
||||
pub.t.flowComplete()
|
||||
}
|
||||
|
||||
c.lastContact.update()
|
||||
DEBUG.Println(NET, "obound wrote msg, id:", msg.MessageID)
|
||||
case msg := <-c.oboundP:
|
||||
switch msg.p.(type) {
|
||||
case *packets.SubscribePacket:
|
||||
msg.p.(*packets.SubscribePacket).MessageID = c.getID(msg.t)
|
||||
case *packets.UnsubscribePacket:
|
||||
msg.p.(*packets.UnsubscribePacket).MessageID = c.getID(msg.t)
|
||||
}
|
||||
DEBUG.Println(NET, "obound priority msg to write, type", reflect.TypeOf(msg.p))
|
||||
if err := msg.p.Write(c.conn); err != nil {
|
||||
ERROR.Println(NET, "outgoing stopped with error")
|
||||
c.errors <- err
|
||||
return
|
||||
}
|
||||
c.lastContact.update()
|
||||
switch msg.p.(type) {
|
||||
case *packets.DisconnectPacket:
|
||||
msg.t.(*DisconnectToken).flowComplete()
|
||||
DEBUG.Println(NET, "outbound wrote disconnect, stopping")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receive Message objects on ibound
|
||||
// store messages if necessary
|
||||
// send replies on obound
|
||||
// delete messages from store if necessary
|
||||
func alllogic(c *Client) {
|
||||
|
||||
DEBUG.Println(NET, "logic started")
|
||||
|
||||
for {
|
||||
DEBUG.Println(NET, "logic waiting for msg on ibound")
|
||||
|
||||
select {
|
||||
case msg := <-c.ibound:
|
||||
DEBUG.Println(NET, "logic got msg on ibound")
|
||||
//persist_ibound(c.persist, msg)
|
||||
switch msg.(type) {
|
||||
case *packets.PingrespPacket:
|
||||
DEBUG.Println(NET, "received pingresp")
|
||||
c.pingOutstanding = false
|
||||
case *packets.SubackPacket:
|
||||
sa := msg.(*packets.SubackPacket)
|
||||
DEBUG.Println(NET, "received suback, id:", sa.MessageID)
|
||||
token := c.getToken(sa.MessageID).(*SubscribeToken)
|
||||
DEBUG.Println(NET, "granted qoss", sa.GrantedQoss)
|
||||
for i, qos := range sa.GrantedQoss {
|
||||
token.subResult[token.subs[i]] = qos
|
||||
}
|
||||
token.flowComplete()
|
||||
go c.freeID(sa.MessageID)
|
||||
case *packets.UnsubackPacket:
|
||||
ua := msg.(*packets.UnsubackPacket)
|
||||
DEBUG.Println(NET, "received unsuback, id:", ua.MessageID)
|
||||
token := c.getToken(ua.MessageID).(*UnsubscribeToken)
|
||||
token.flowComplete()
|
||||
go c.freeID(ua.MessageID)
|
||||
case *packets.PublishPacket:
|
||||
pp := msg.(*packets.PublishPacket)
|
||||
DEBUG.Println(NET, "received publish, msgId:", pp.MessageID)
|
||||
DEBUG.Println(NET, "putting msg on onPubChan")
|
||||
switch pp.Qos {
|
||||
case 2:
|
||||
c.incomingPubChan <- pp
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
pr := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
|
||||
pr.MessageID = pp.MessageID
|
||||
DEBUG.Println(NET, "putting pubrec msg on obound")
|
||||
c.oboundP <- &PacketAndToken{p: pr, t: nil}
|
||||
DEBUG.Println(NET, "done putting pubrec msg on obound")
|
||||
case 1:
|
||||
c.incomingPubChan <- pp
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
pa := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||
pa.MessageID = pp.MessageID
|
||||
DEBUG.Println(NET, "putting puback msg on obound")
|
||||
c.oboundP <- &PacketAndToken{p: pa, t: nil}
|
||||
DEBUG.Println(NET, "done putting puback msg on obound")
|
||||
case 0:
|
||||
select {
|
||||
case c.incomingPubChan <- pp:
|
||||
DEBUG.Println(NET, "done putting msg on incomingPubChan")
|
||||
case err, ok := <-c.errors:
|
||||
DEBUG.Println(NET, "error while putting msg on pubChanZero")
|
||||
// We are unblocked, but need to put the error back on so the outer
|
||||
// select can handle it appropriately.
|
||||
if ok {
|
||||
go func(errVal error, errChan chan error) {
|
||||
errChan <- errVal
|
||||
}(err, c.errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
case *packets.PubackPacket:
|
||||
pa := msg.(*packets.PubackPacket)
|
||||
DEBUG.Println(NET, "received puback, id:", pa.MessageID)
|
||||
// c.receipts.get(msg.MsgId()) <- Receipt{}
|
||||
// c.receipts.end(msg.MsgId())
|
||||
c.getToken(pa.MessageID).flowComplete()
|
||||
c.freeID(pa.MessageID)
|
||||
case *packets.PubrecPacket:
|
||||
prec := msg.(*packets.PubrecPacket)
|
||||
DEBUG.Println(NET, "received pubrec, id:", prec.MessageID)
|
||||
prel := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
prel.MessageID = prec.MessageID
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: prel, t: nil}:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
case *packets.PubrelPacket:
|
||||
pr := msg.(*packets.PubrelPacket)
|
||||
DEBUG.Println(NET, "received pubrel, id:", pr.MessageID)
|
||||
pc := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
|
||||
pc.MessageID = pr.MessageID
|
||||
select {
|
||||
case c.oboundP <- &PacketAndToken{p: pc, t: nil}:
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
case *packets.PubcompPacket:
|
||||
pc := msg.(*packets.PubcompPacket)
|
||||
DEBUG.Println(NET, "received pubcomp, id:", pc.MessageID)
|
||||
c.getToken(pc.MessageID).flowComplete()
|
||||
c.freeID(pc.MessageID)
|
||||
}
|
||||
case <-c.stop:
|
||||
WARN.Println(NET, "logic stopped")
|
||||
return
|
||||
case err := <-c.errors:
|
||||
ERROR.Println(NET, "logic got error")
|
||||
c.internalConnLost(err)
|
||||
return
|
||||
}
|
||||
c.lastContact.update()
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_openConnection(t *testing.T) {
|
||||
_, err := strconv.Atoi("")
|
||||
e := fmt.Errorf(" : %s", err)
|
||||
t.Errorf("%#v", e)
|
||||
|
||||
e1 := errors.New("hogehoge %s")
|
||||
t.Errorf("%#v", e1)
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
<?xml version="1.0" encoding="ISO-8859-1" ?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1" />
|
||||
<title>Eclipse Foundation Software User Agreement</title>
|
||||
</head>
|
||||
|
||||
<body lang="EN-US">
|
||||
<h2>Eclipse Foundation Software User Agreement</h2>
|
||||
<p>February 1, 2011</p>
|
||||
|
||||
<h3>Usage Of Content</h3>
|
||||
|
||||
<p>THE ECLIPSE FOUNDATION MAKES AVAILABLE SOFTWARE, DOCUMENTATION, INFORMATION AND/OR OTHER MATERIALS FOR OPEN SOURCE PROJECTS
|
||||
(COLLECTIVELY "CONTENT"). USE OF THE CONTENT IS GOVERNED BY THE TERMS AND CONDITIONS OF THIS AGREEMENT AND/OR THE TERMS AND
|
||||
CONDITIONS OF LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW. BY USING THE CONTENT, YOU AGREE THAT YOUR USE
|
||||
OF THE CONTENT IS GOVERNED BY THIS AGREEMENT AND/OR THE TERMS AND CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR
|
||||
NOTICES INDICATED OR REFERENCED BELOW. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT AND THE TERMS AND
|
||||
CONDITIONS OF ANY APPLICABLE LICENSE AGREEMENTS OR NOTICES INDICATED OR REFERENCED BELOW, THEN YOU MAY NOT USE THE CONTENT.</p>
|
||||
|
||||
<h3>Applicable Licenses</h3>
|
||||
|
||||
<p>Unless otherwise indicated, all Content made available by the Eclipse Foundation is provided to you under the terms and conditions of the Eclipse Public License Version 1.0
|
||||
("EPL"). A copy of the EPL is provided with this Content and is also available at <a href="http://www.eclipse.org/legal/epl-v10.html">http://www.eclipse.org/legal/epl-v10.html</a>.
|
||||
For purposes of the EPL, "Program" will mean the Content.</p>
|
||||
|
||||
<p>Content includes, but is not limited to, source code, object code, documentation and other files maintained in the Eclipse Foundation source code
|
||||
repository ("Repository") in software modules ("Modules") and made available as downloadable archives ("Downloads").</p>
|
||||
|
||||
<ul>
|
||||
<li>Content may be structured and packaged into modules to facilitate delivering, extending, and upgrading the Content. Typical modules may include plug-ins ("Plug-ins"), plug-in fragments ("Fragments"), and features ("Features").</li>
|
||||
<li>Each Plug-in or Fragment may be packaged as a sub-directory or JAR (Java™ ARchive) in a directory named "plugins".</li>
|
||||
<li>A Feature is a bundle of one or more Plug-ins and/or Fragments and associated material. Each Feature may be packaged as a sub-directory in a directory named "features". Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of the Plug-ins
|
||||
and/or Fragments associated with that Feature.</li>
|
||||
<li>Features may also include other Features ("Included Features"). Within a Feature, files named "feature.xml" may contain a list of the names and version numbers of Included Features.</li>
|
||||
</ul>
|
||||
|
||||
<p>The terms and conditions governing Plug-ins and Fragments should be contained in files named "about.html" ("Abouts"). The terms and conditions governing Features and
|
||||
Included Features should be contained in files named "license.html" ("Feature Licenses"). Abouts and Feature Licenses may be located in any directory of a Download or Module
|
||||
including, but not limited to the following locations:</p>
|
||||
|
||||
<ul>
|
||||
<li>The top-level (root) directory</li>
|
||||
<li>Plug-in and Fragment directories</li>
|
||||
<li>Inside Plug-ins and Fragments packaged as JARs</li>
|
||||
<li>Sub-directories of the directory named "src" of certain Plug-ins</li>
|
||||
<li>Feature directories</li>
|
||||
</ul>
|
||||
|
||||
<p>Note: if a Feature made available by the Eclipse Foundation is installed using the Provisioning Technology (as defined below), you must agree to a license ("Feature Update License") during the
|
||||
installation process. If the Feature contains Included Features, the Feature Update License should either provide you with the terms and conditions governing the Included Features or
|
||||
inform you where you can locate them. Feature Update Licenses may be found in the "license" property of files named "feature.properties" found within a Feature.
|
||||
Such Abouts, Feature Licenses, and Feature Update Licenses contain the terms and conditions (or references to such terms and conditions) that govern your use of the associated Content in
|
||||
that directory.</p>
|
||||
|
||||
<p>THE ABOUTS, FEATURE LICENSES, AND FEATURE UPDATE LICENSES MAY REFER TO THE EPL OR OTHER LICENSE AGREEMENTS, NOTICES OR TERMS AND CONDITIONS. SOME OF THESE
|
||||
OTHER LICENSE AGREEMENTS MAY INCLUDE (BUT ARE NOT LIMITED TO):</p>
|
||||
|
||||
<ul>
|
||||
<li>Eclipse Distribution License Version 1.0 (available at <a href="http://www.eclipse.org/licenses/edl-v10.html">http://www.eclipse.org/licenses/edl-v1.0.html</a>)</li>
|
||||
<li>Common Public License Version 1.0 (available at <a href="http://www.eclipse.org/legal/cpl-v10.html">http://www.eclipse.org/legal/cpl-v10.html</a>)</li>
|
||||
<li>Apache Software License 1.1 (available at <a href="http://www.apache.org/licenses/LICENSE">http://www.apache.org/licenses/LICENSE</a>)</li>
|
||||
<li>Apache Software License 2.0 (available at <a href="http://www.apache.org/licenses/LICENSE-2.0">http://www.apache.org/licenses/LICENSE-2.0</a>)</li>
|
||||
<li>Metro Link Public License 1.00 (available at <a href="http://www.opengroup.org/openmotif/supporters/metrolink/license.html">http://www.opengroup.org/openmotif/supporters/metrolink/license.html</a>)</li>
|
||||
<li>Mozilla Public License Version 1.1 (available at <a href="http://www.mozilla.org/MPL/MPL-1.1.html">http://www.mozilla.org/MPL/MPL-1.1.html</a>)</li>
|
||||
</ul>
|
||||
|
||||
<p>IT IS YOUR OBLIGATION TO READ AND ACCEPT ALL SUCH TERMS AND CONDITIONS PRIOR TO USE OF THE CONTENT. If no About, Feature License, or Feature Update License is provided, please
|
||||
contact the Eclipse Foundation to determine what terms and conditions govern that particular Content.</p>
|
||||
|
||||
|
||||
<h3>Use of Provisioning Technology</h3>
|
||||
|
||||
<p>The Eclipse Foundation makes available provisioning software, examples of which include, but are not limited to, p2 and the Eclipse
|
||||
Update Manager ("Provisioning Technology") for the purpose of allowing users to install software, documentation, information and/or
|
||||
other materials (collectively "Installable Software"). This capability is provided with the intent of allowing such users to
|
||||
install, extend and update Eclipse-based products. Information about packaging Installable Software is available at <a
|
||||
href="http://eclipse.org/equinox/p2/repository_packaging.html">http://eclipse.org/equinox/p2/repository_packaging.html</a>
|
||||
("Specification").</p>
|
||||
|
||||
<p>You may use Provisioning Technology to allow other parties to install Installable Software. You shall be responsible for enabling the
|
||||
applicable license agreements relating to the Installable Software to be presented to, and accepted by, the users of the Provisioning Technology
|
||||
in accordance with the Specification. By using Provisioning Technology in such a manner and making it available in accordance with the
|
||||
Specification, you further acknowledge your agreement to, and the acquisition of all necessary rights to permit the following:</p>
|
||||
|
||||
<ol>
|
||||
<li>A series of actions may occur ("Provisioning Process") in which a user may execute the Provisioning Technology
|
||||
on a machine ("Target Machine") with the intent of installing, extending or updating the functionality of an Eclipse-based
|
||||
product.</li>
|
||||
<li>During the Provisioning Process, the Provisioning Technology may cause third party Installable Software or a portion thereof to be
|
||||
accessed and copied to the Target Machine.</li>
|
||||
<li>Pursuant to the Specification, you will provide to the user the terms and conditions that govern the use of the Installable
|
||||
Software ("Installable Software Agreement") and such Installable Software Agreement shall be accessed from the Target
|
||||
Machine in accordance with the Specification. Such Installable Software Agreement must inform the user of the terms and conditions that govern
|
||||
the Installable Software and must solicit acceptance by the end user in the manner prescribed in such Installable Software Agreement. Upon such
|
||||
indication of agreement by the user, the provisioning Technology will complete installation of the Installable Software.</li>
|
||||
</ol>
|
||||
|
||||
<h3>Cryptography</h3>
|
||||
|
||||
<p>Content may contain encryption software. The country in which you are currently may have restrictions on the import, possession, and use, and/or re-export to
|
||||
another country, of encryption software. BEFORE using any encryption software, please check the country's laws, regulations and policies concerning the import,
|
||||
possession, or use, and re-export of encryption software, to see if this is permitted.</p>
|
||||
|
||||
<p><small>Java and all Java-based trademarks are trademarks of Oracle Corporation in the United States, other countries, or both.</small></p>
|
||||
</body>
|
||||
</html>
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
func chkerr(e error) {
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
|
||||
func chkcond(b bool) {
|
||||
if !b {
|
||||
panic("oops")
|
||||
}
|
||||
}
|
|
@ -1,270 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MessageHandler is a callback type which can be set to be
|
||||
// executed upon the arrival of messages published to topics
|
||||
// to which the client is subscribed.
|
||||
type MessageHandler func(*Client, Message)
|
||||
|
||||
// ConnectionLostHandler is a callback type which can be set to be
|
||||
// executed upon an unintended disconnection from the MQTT broker.
|
||||
// Disconnects caused by calling Disconnect or ForceDisconnect will
|
||||
// not cause an OnConnectionLost callback to execute.
|
||||
type ConnectionLostHandler func(*Client, error)
|
||||
|
||||
// OnConnectHandler is a callback that is called when the client
|
||||
// state changes from unconnected/disconnected to connected. Both
|
||||
// at initial connection and on reconnection
|
||||
type OnConnectHandler func(*Client)
|
||||
|
||||
// ClientOptions contains configurable options for an Client.
|
||||
type ClientOptions struct {
|
||||
Servers []*url.URL
|
||||
ClientID string
|
||||
Username string
|
||||
Password string
|
||||
CleanSession bool
|
||||
Order bool
|
||||
WillEnabled bool
|
||||
WillTopic string
|
||||
WillPayload []byte
|
||||
WillQos byte
|
||||
WillRetained bool
|
||||
ProtocolVersion uint
|
||||
protocolVersionExplicit bool
|
||||
TLSConfig tls.Config
|
||||
KeepAlive time.Duration
|
||||
ConnectTimeout time.Duration
|
||||
MaxReconnectInterval time.Duration
|
||||
AutoReconnect bool
|
||||
Store Store
|
||||
DefaultPublishHander MessageHandler
|
||||
OnConnect OnConnectHandler
|
||||
OnConnectionLost ConnectionLostHandler
|
||||
WriteTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewClientOptions will create a new ClientClientOptions type with some
|
||||
// default values.
|
||||
// Port: 1883
|
||||
// CleanSession: True
|
||||
// Order: True
|
||||
// KeepAlive: 30 (seconds)
|
||||
// ConnectTimeout: 30 (seconds)
|
||||
// MaxReconnectInterval 10 (minutes)
|
||||
// AutoReconnect: True
|
||||
func NewClientOptions() *ClientOptions {
|
||||
o := &ClientOptions{
|
||||
Servers: nil,
|
||||
ClientID: "",
|
||||
Username: "",
|
||||
Password: "",
|
||||
CleanSession: true,
|
||||
Order: true,
|
||||
WillEnabled: false,
|
||||
WillTopic: "",
|
||||
WillPayload: nil,
|
||||
WillQos: 0,
|
||||
WillRetained: false,
|
||||
ProtocolVersion: 0,
|
||||
protocolVersionExplicit: false,
|
||||
TLSConfig: tls.Config{},
|
||||
KeepAlive: 30 * time.Second,
|
||||
ConnectTimeout: 30 * time.Second,
|
||||
MaxReconnectInterval: 10 * time.Minute,
|
||||
AutoReconnect: true,
|
||||
Store: nil,
|
||||
OnConnect: nil,
|
||||
OnConnectionLost: DefaultConnectionLostHandler,
|
||||
WriteTimeout: 0, // 0 represents timeout disabled
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// AddBroker adds a broker URI to the list of brokers to be used. The format should be
|
||||
// scheme://host:port
|
||||
// Where "scheme" is one of "tcp", "ssl", or "ws", "host" is the ip-address (or hostname)
|
||||
// and "port" is the port on which the broker is accepting connections.
|
||||
func (o *ClientOptions) AddBroker(server string) *ClientOptions {
|
||||
brokerURI, _ := url.Parse(server)
|
||||
o.Servers = append(o.Servers, brokerURI)
|
||||
return o
|
||||
}
|
||||
|
||||
// SetClientID will set the client id to be used by this client when
|
||||
// connecting to the MQTT broker. According to the MQTT v3.1 specification,
|
||||
// a client id mus be no longer than 23 characters.
|
||||
func (o *ClientOptions) SetClientID(id string) *ClientOptions {
|
||||
o.ClientID = id
|
||||
return o
|
||||
}
|
||||
|
||||
// SetUsername will set the username to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext accross the wire.
|
||||
func (o *ClientOptions) SetUsername(u string) *ClientOptions {
|
||||
o.Username = u
|
||||
return o
|
||||
}
|
||||
|
||||
// SetPassword will set the password to be used by this client when connecting
|
||||
// to the MQTT broker. Note: without the use of SSL/TLS, this information will
|
||||
// be sent in plaintext accross the wire.
|
||||
func (o *ClientOptions) SetPassword(p string) *ClientOptions {
|
||||
o.Password = p
|
||||
return o
|
||||
}
|
||||
|
||||
// SetCleanSession will set the "clean session" flag in the connect message
|
||||
// when this client connects to an MQTT broker. By setting this flag, you are
|
||||
// indicating that no messages saved by the broker for this client should be
|
||||
// delivered. Any messages that were going to be sent by this client before
|
||||
// diconnecting previously but didn't will not be sent upon connecting to the
|
||||
// broker.
|
||||
func (o *ClientOptions) SetCleanSession(clean bool) *ClientOptions {
|
||||
o.CleanSession = clean
|
||||
return o
|
||||
}
|
||||
|
||||
// SetOrderMatters will set the message routing to guarantee order within
|
||||
// each QoS level. By default, this value is true. If set to false,
|
||||
// this flag indicates that messages can be delivered asynchronously
|
||||
// from the client to the application and possibly arrive out of order.
|
||||
func (o *ClientOptions) SetOrderMatters(order bool) *ClientOptions {
|
||||
o.Order = order
|
||||
return o
|
||||
}
|
||||
|
||||
// SetTLSConfig will set an SSL/TLS configuration to be used when connecting
|
||||
// to an MQTT broker. Please read the official Go documentation for more
|
||||
// information.
|
||||
func (o *ClientOptions) SetTLSConfig(t *tls.Config) *ClientOptions {
|
||||
o.TLSConfig = *t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetStore will set the implementation of the Store interface
|
||||
// used to provide message persistence in cases where QoS levels
|
||||
// QoS_ONE or QoS_TWO are used. If no store is provided, then the
|
||||
// client will use MemoryStore by default.
|
||||
func (o *ClientOptions) SetStore(s Store) *ClientOptions {
|
||||
o.Store = s
|
||||
return o
|
||||
}
|
||||
|
||||
// SetKeepAlive will set the amount of time (in seconds) that the client
|
||||
// should wait before sending a PING request to the broker. This will
|
||||
// allow the client to know that a connection has not been lost with the
|
||||
// server.
|
||||
func (o *ClientOptions) SetKeepAlive(k time.Duration) *ClientOptions {
|
||||
o.KeepAlive = k
|
||||
return o
|
||||
}
|
||||
|
||||
// SetProtocolVersion sets the MQTT version to be used to connect to the
|
||||
// broker. Legitimate values are currently 3 - MQTT 3.1 or 4 - MQTT 3.1.1
|
||||
func (o *ClientOptions) SetProtocolVersion(pv uint) *ClientOptions {
|
||||
if pv >= 3 && pv <= 4 {
|
||||
o.ProtocolVersion = pv
|
||||
o.protocolVersionExplicit = true
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// UnsetWill will cause any set will message to be disregarded.
|
||||
func (o *ClientOptions) UnsetWill() *ClientOptions {
|
||||
o.WillEnabled = false
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWill accepts a string will message to be set. When the client connects,
|
||||
// it will give this will message to the broker, which will then publish the
|
||||
// provided payload (the will) to any clients that are subscribed to the provided
|
||||
// topic.
|
||||
func (o *ClientOptions) SetWill(topic string, payload string, qos byte, retained bool) *ClientOptions {
|
||||
o.SetBinaryWill(topic, []byte(payload), qos, retained)
|
||||
return o
|
||||
}
|
||||
|
||||
// SetBinaryWill accepts a []byte will message to be set. When the client connects,
|
||||
// it will give this will message to the broker, which will then publish the
|
||||
// provided payload (the will) to any clients that are subscribed to the provided
|
||||
// topic.
|
||||
func (o *ClientOptions) SetBinaryWill(topic string, payload []byte, qos byte, retained bool) *ClientOptions {
|
||||
o.WillEnabled = true
|
||||
o.WillTopic = topic
|
||||
o.WillPayload = payload
|
||||
o.WillQos = qos
|
||||
o.WillRetained = retained
|
||||
return o
|
||||
}
|
||||
|
||||
// SetDefaultPublishHandler sets the MessageHandler that will be called when a message
|
||||
// is received that does not match any known subscriptions.
|
||||
func (o *ClientOptions) SetDefaultPublishHandler(defaultHandler MessageHandler) *ClientOptions {
|
||||
o.DefaultPublishHander = defaultHandler
|
||||
return o
|
||||
}
|
||||
|
||||
// SetOnConnectHandler sets the function to be called when the client is connected. Both
|
||||
// at initial connection time and upon automatic reconnect.
|
||||
func (o *ClientOptions) SetOnConnectHandler(onConn OnConnectHandler) *ClientOptions {
|
||||
o.OnConnect = onConn
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectionLostHandler will set the OnConnectionLost callback to be executed
|
||||
// in the case where the client unexpectedly loses connection with the MQTT broker.
|
||||
func (o *ClientOptions) SetConnectionLostHandler(onLost ConnectionLostHandler) *ClientOptions {
|
||||
o.OnConnectionLost = onLost
|
||||
return o
|
||||
}
|
||||
|
||||
// SetWriteTimeout puts a limit on how long a mqtt publish should block until it unblocks with a
|
||||
// timeout error. A duration of 0 never times out. Default 30 seconds
|
||||
func (o *ClientOptions) SetWriteTimeout(t time.Duration) *ClientOptions {
|
||||
o.WriteTimeout = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetConnectTimeout limits how long the client will wait when trying to open a connection
|
||||
// to an MQTT server before timeing out and erroring the attempt. A duration of 0 never times out.
|
||||
// Default 30 seconds. Currently only operational on TCP/TLS connections.
|
||||
func (o *ClientOptions) SetConnectTimeout(t time.Duration) *ClientOptions {
|
||||
o.ConnectTimeout = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetMaxReconnectInterval sets the maximum time that will be waited between reconnection attempts
|
||||
// when connection is lost
|
||||
func (o *ClientOptions) SetMaxReconnectInterval(t time.Duration) *ClientOptions {
|
||||
o.MaxReconnectInterval = t
|
||||
return o
|
||||
}
|
||||
|
||||
// SetAutoReconnect sets whether the automatic reconnection logic should be used
|
||||
// when the connection is lost, even if disabled the ConnectionLostHandler is still
|
||||
// called
|
||||
func (o *ClientOptions) SetAutoReconnect(a bool) *ClientOptions {
|
||||
o.AutoReconnect = a
|
||||
return o
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//ConnackPacket is an internal representation of the fields of the
|
||||
//Connack MQTT packet
|
||||
type ConnackPacket struct {
|
||||
FixedHeader
|
||||
TopicNameCompression byte
|
||||
ReturnCode byte
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", ca.FixedHeader)
|
||||
str += fmt.Sprintf("returncode: %d", ca.ReturnCode)
|
||||
return str
|
||||
}
|
||||
|
||||
func (ca *ConnackPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.WriteByte(ca.TopicNameCompression)
|
||||
body.WriteByte(ca.ReturnCode)
|
||||
ca.FixedHeader.RemainingLength = 2
|
||||
packet := ca.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (ca *ConnackPacket) Unpack(b io.Reader) {
|
||||
ca.TopicNameCompression = decodeByte(b)
|
||||
ca.ReturnCode = decodeByte(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (ca *ConnackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (ca *ConnackPacket) UUID() uuid.UUID {
|
||||
return ca.uuid
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//ConnectPacket is an internal representation of the fields of the
|
||||
//Connect MQTT packet
|
||||
type ConnectPacket struct {
|
||||
FixedHeader
|
||||
ProtocolName string
|
||||
ProtocolVersion byte
|
||||
CleanSession bool
|
||||
WillFlag bool
|
||||
WillQos byte
|
||||
WillRetain bool
|
||||
UsernameFlag bool
|
||||
PasswordFlag bool
|
||||
ReservedBit byte
|
||||
KeepaliveTimer uint16
|
||||
|
||||
ClientIdentifier string
|
||||
WillTopic string
|
||||
WillMessage []byte
|
||||
Username string
|
||||
Password []byte
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", c.FixedHeader)
|
||||
str += fmt.Sprintf("protocolversion: %d protocolname: %s cleansession: %t willflag: %t WillQos: %d WillRetain: %t Usernameflag: %t Passwordflag: %t keepalivetimer: %d\nclientId: %s\nwilltopic: %s\nwillmessage: %s\nUsername: %s\nPassword: %s\n", c.ProtocolVersion, c.ProtocolName, c.CleanSession, c.WillFlag, c.WillQos, c.WillRetain, c.UsernameFlag, c.PasswordFlag, c.KeepaliveTimer, c.ClientIdentifier, c.WillTopic, c.WillMessage, c.Username, c.Password)
|
||||
return str
|
||||
}
|
||||
|
||||
func (c *ConnectPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeString(c.ProtocolName))
|
||||
body.WriteByte(c.ProtocolVersion)
|
||||
body.WriteByte(boolToByte(c.CleanSession)<<1 | boolToByte(c.WillFlag)<<2 | c.WillQos<<3 | boolToByte(c.WillRetain)<<5 | boolToByte(c.PasswordFlag)<<6 | boolToByte(c.UsernameFlag)<<7)
|
||||
body.Write(encodeUint16(c.KeepaliveTimer))
|
||||
body.Write(encodeString(c.ClientIdentifier))
|
||||
if c.WillFlag {
|
||||
body.Write(encodeString(c.WillTopic))
|
||||
body.Write(encodeBytes(c.WillMessage))
|
||||
}
|
||||
if c.UsernameFlag {
|
||||
body.Write(encodeString(c.Username))
|
||||
}
|
||||
if c.PasswordFlag {
|
||||
body.Write(encodeBytes(c.Password))
|
||||
}
|
||||
c.FixedHeader.RemainingLength = body.Len()
|
||||
packet := c.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (c *ConnectPacket) Unpack(b io.Reader) {
|
||||
c.ProtocolName = decodeString(b)
|
||||
c.ProtocolVersion = decodeByte(b)
|
||||
options := decodeByte(b)
|
||||
c.ReservedBit = 1 & options
|
||||
c.CleanSession = 1&(options>>1) > 0
|
||||
c.WillFlag = 1&(options>>2) > 0
|
||||
c.WillQos = 3 & (options >> 3)
|
||||
c.WillRetain = 1&(options>>5) > 0
|
||||
c.PasswordFlag = 1&(options>>6) > 0
|
||||
c.UsernameFlag = 1&(options>>7) > 0
|
||||
c.KeepaliveTimer = decodeUint16(b)
|
||||
c.ClientIdentifier = decodeString(b)
|
||||
if c.WillFlag {
|
||||
c.WillTopic = decodeString(b)
|
||||
c.WillMessage = decodeBytes(b)
|
||||
}
|
||||
if c.UsernameFlag {
|
||||
c.Username = decodeString(b)
|
||||
}
|
||||
if c.PasswordFlag {
|
||||
c.Password = decodeBytes(b)
|
||||
}
|
||||
}
|
||||
|
||||
//Validate performs validation of the fields of a Connect packet
|
||||
func (c *ConnectPacket) Validate() byte {
|
||||
if c.PasswordFlag && !c.UsernameFlag {
|
||||
return ErrRefusedBadUsernameOrPassword
|
||||
}
|
||||
if c.ReservedBit != 0 {
|
||||
//Bad reserved bit
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if (c.ProtocolName == "MQIsdp" && c.ProtocolVersion != 3) || (c.ProtocolName == "MQTT" && c.ProtocolVersion != 4) {
|
||||
//Mismatched or unsupported protocol version
|
||||
return ErrRefusedBadProtocolVersion
|
||||
}
|
||||
if c.ProtocolName != "MQIsdp" && c.ProtocolName != "MQTT" {
|
||||
//Bad protocol name
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
if len(c.ClientIdentifier) > 65535 || len(c.Username) > 65535 || len(c.Password) > 65535 {
|
||||
//Bad size field
|
||||
return ErrProtocolViolation
|
||||
}
|
||||
return Accepted
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (c *ConnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (c *ConnectPacket) UUID() uuid.UUID {
|
||||
return c.uuid
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//DisconnectPacket is an internal representation of the fields of the
|
||||
//Disconnect MQTT packet
|
||||
type DisconnectPacket struct {
|
||||
FixedHeader
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", d.FixedHeader)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *DisconnectPacket) Write(w io.Writer) error {
|
||||
packet := d.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (d *DisconnectPacket) Unpack(b io.Reader) {
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (d *DisconnectPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (d *DisconnectPacket) UUID() uuid.UUID {
|
||||
return d.uuid
|
||||
}
|
|
@ -1,324 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//ControlPacket defines the interface for structs intended to hold
|
||||
//decoded MQTT packets, either from being read or before being
|
||||
//written
|
||||
type ControlPacket interface {
|
||||
Write(io.Writer) error
|
||||
Unpack(io.Reader)
|
||||
String() string
|
||||
Details() Details
|
||||
UUID() uuid.UUID
|
||||
}
|
||||
|
||||
//PacketNames maps the constants for each of the MQTT packet types
|
||||
//to a string representation of their name.
|
||||
var PacketNames = map[uint8]string{
|
||||
1: "CONNECT",
|
||||
2: "CONNACK",
|
||||
3: "PUBLISH",
|
||||
4: "PUBACK",
|
||||
5: "PUBREC",
|
||||
6: "PUBREL",
|
||||
7: "PUBCOMP",
|
||||
8: "SUBSCRIBE",
|
||||
9: "SUBACK",
|
||||
10: "UNSUBSCRIBE",
|
||||
11: "UNSUBACK",
|
||||
12: "PINGREQ",
|
||||
13: "PINGRESP",
|
||||
14: "DISCONNECT",
|
||||
}
|
||||
|
||||
//Below are the constants assigned to each of the MQTT packet types
|
||||
const (
|
||||
Connect = 1
|
||||
Connack = 2
|
||||
Publish = 3
|
||||
Puback = 4
|
||||
Pubrec = 5
|
||||
Pubrel = 6
|
||||
Pubcomp = 7
|
||||
Subscribe = 8
|
||||
Suback = 9
|
||||
Unsubscribe = 10
|
||||
Unsuback = 11
|
||||
Pingreq = 12
|
||||
Pingresp = 13
|
||||
Disconnect = 14
|
||||
)
|
||||
|
||||
//Below are the const definitions for error codes returned by
|
||||
//Connect()
|
||||
const (
|
||||
Accepted = 0x00
|
||||
ErrRefusedBadProtocolVersion = 0x01
|
||||
ErrRefusedIDRejected = 0x02
|
||||
ErrRefusedServerUnavailable = 0x03
|
||||
ErrRefusedBadUsernameOrPassword = 0x04
|
||||
ErrRefusedNotAuthorised = 0x05
|
||||
ErrNetworkError = 0xFE
|
||||
ErrProtocolViolation = 0xFF
|
||||
)
|
||||
|
||||
//ConnackReturnCodes is a map of the error codes constants for Connect()
|
||||
//to a string representation of the error
|
||||
var ConnackReturnCodes = map[uint8]string{
|
||||
0: "Connection Accepted",
|
||||
1: "Connection Refused: Bad Protocol Version",
|
||||
2: "Connection Refused: Client Identifier Rejected",
|
||||
3: "Connection Refused: Server Unavailable",
|
||||
4: "Connection Refused: Username or Password in unknown format",
|
||||
5: "Connection Refused: Not Authorised",
|
||||
254: "Connection Error",
|
||||
255: "Connection Refused: Protocol Violation",
|
||||
}
|
||||
|
||||
//ConnErrors is a map of the errors codes constants for Connect()
|
||||
//to a Go error
|
||||
var ConnErrors = map[byte]error{
|
||||
Accepted: nil,
|
||||
ErrRefusedBadProtocolVersion: errors.New("Unnacceptable protocol version"),
|
||||
ErrRefusedIDRejected: errors.New("Identifier rejected"),
|
||||
ErrRefusedServerUnavailable: errors.New("Server Unavailable"),
|
||||
ErrRefusedBadUsernameOrPassword: errors.New("Bad user name or password"),
|
||||
ErrRefusedNotAuthorised: errors.New("Not Authorized"),
|
||||
ErrNetworkError: errors.New("Network Error"),
|
||||
ErrProtocolViolation: errors.New("Protocol Violation"),
|
||||
}
|
||||
|
||||
//ReadPacket takes an instance of an io.Reader (such as net.Conn) and attempts
|
||||
//to read an MQTT packet from the stream. It returns a ControlPacket
|
||||
//representing the decoded MQTT packet and an error. One of these returns will
|
||||
//always be nil, a nil ControlPacket indicating an error occurred.
|
||||
func ReadPacket(r io.Reader) (cp ControlPacket, err error) {
|
||||
var fh FixedHeader
|
||||
b := make([]byte, 1)
|
||||
|
||||
_, err = io.ReadFull(r, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fh.unpack(b[0], r)
|
||||
cp = NewControlPacketWithHeader(fh)
|
||||
if cp == nil {
|
||||
return nil, errors.New("Bad data from client")
|
||||
}
|
||||
packetBytes := make([]byte, fh.RemainingLength)
|
||||
_, err = io.ReadFull(r, packetBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp.Unpack(bytes.NewBuffer(packetBytes))
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
//NewControlPacket is used to create a new ControlPacket of the type specified
|
||||
//by packetType, this is usually done by reference to the packet type constants
|
||||
//defined in packets.go. The newly created ControlPacket is empty and a pointer
|
||||
//is returned.
|
||||
func NewControlPacket(packetType byte) (cp ControlPacket) {
|
||||
switch packetType {
|
||||
case Connect:
|
||||
cp = &ConnectPacket{FixedHeader: FixedHeader{MessageType: Connect}, uuid: uuid.NewUUID()}
|
||||
case Connack:
|
||||
cp = &ConnackPacket{FixedHeader: FixedHeader{MessageType: Connack}, uuid: uuid.NewUUID()}
|
||||
case Disconnect:
|
||||
cp = &DisconnectPacket{FixedHeader: FixedHeader{MessageType: Disconnect}, uuid: uuid.NewUUID()}
|
||||
case Publish:
|
||||
cp = &PublishPacket{FixedHeader: FixedHeader{MessageType: Publish}, uuid: uuid.NewUUID()}
|
||||
case Puback:
|
||||
cp = &PubackPacket{FixedHeader: FixedHeader{MessageType: Puback}, uuid: uuid.NewUUID()}
|
||||
case Pubrec:
|
||||
cp = &PubrecPacket{FixedHeader: FixedHeader{MessageType: Pubrec}, uuid: uuid.NewUUID()}
|
||||
case Pubrel:
|
||||
cp = &PubrelPacket{FixedHeader: FixedHeader{MessageType: Pubrel, Qos: 1}, uuid: uuid.NewUUID()}
|
||||
case Pubcomp:
|
||||
cp = &PubcompPacket{FixedHeader: FixedHeader{MessageType: Pubcomp}, uuid: uuid.NewUUID()}
|
||||
case Subscribe:
|
||||
cp = &SubscribePacket{FixedHeader: FixedHeader{MessageType: Subscribe, Qos: 1}, uuid: uuid.NewUUID()}
|
||||
case Suback:
|
||||
cp = &SubackPacket{FixedHeader: FixedHeader{MessageType: Suback}, uuid: uuid.NewUUID()}
|
||||
case Unsubscribe:
|
||||
cp = &UnsubscribePacket{FixedHeader: FixedHeader{MessageType: Unsubscribe, Qos: 1}, uuid: uuid.NewUUID()}
|
||||
case Unsuback:
|
||||
cp = &UnsubackPacket{FixedHeader: FixedHeader{MessageType: Unsuback}, uuid: uuid.NewUUID()}
|
||||
case Pingreq:
|
||||
cp = &PingreqPacket{FixedHeader: FixedHeader{MessageType: Pingreq}, uuid: uuid.NewUUID()}
|
||||
case Pingresp:
|
||||
cp = &PingrespPacket{FixedHeader: FixedHeader{MessageType: Pingresp}, uuid: uuid.NewUUID()}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
//NewControlPacketWithHeader is used to create a new ControlPacket of the type
|
||||
//specified within the FixedHeader that is passed to the function.
|
||||
//The newly created ControlPacket is empty and a pointer is returned.
|
||||
func NewControlPacketWithHeader(fh FixedHeader) (cp ControlPacket) {
|
||||
switch fh.MessageType {
|
||||
case Connect:
|
||||
cp = &ConnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Connack:
|
||||
cp = &ConnackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Disconnect:
|
||||
cp = &DisconnectPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Publish:
|
||||
cp = &PublishPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Puback:
|
||||
cp = &PubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Pubrec:
|
||||
cp = &PubrecPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Pubrel:
|
||||
cp = &PubrelPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Pubcomp:
|
||||
cp = &PubcompPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Subscribe:
|
||||
cp = &SubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Suback:
|
||||
cp = &SubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Unsubscribe:
|
||||
cp = &UnsubscribePacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Unsuback:
|
||||
cp = &UnsubackPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Pingreq:
|
||||
cp = &PingreqPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
case Pingresp:
|
||||
cp = &PingrespPacket{FixedHeader: fh, uuid: uuid.NewUUID()}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
//Details struct returned by the Details() function called on
|
||||
//ControlPackets to present details of the Qos and MessageID
|
||||
//of the ControlPacket
|
||||
type Details struct {
|
||||
Qos byte
|
||||
MessageID uint16
|
||||
}
|
||||
|
||||
//FixedHeader is a struct to hold the decoded information from
|
||||
//the fixed header of an MQTT ControlPacket
|
||||
type FixedHeader struct {
|
||||
MessageType byte
|
||||
Dup bool
|
||||
Qos byte
|
||||
Retain bool
|
||||
RemainingLength int
|
||||
}
|
||||
|
||||
func (fh FixedHeader) String() string {
|
||||
return fmt.Sprintf("%s: dup: %t qos: %d retain: %t rLength: %d", PacketNames[fh.MessageType], fh.Dup, fh.Qos, fh.Retain, fh.RemainingLength)
|
||||
}
|
||||
|
||||
func boolToByte(b bool) byte {
|
||||
switch b {
|
||||
case true:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (fh *FixedHeader) pack() bytes.Buffer {
|
||||
var header bytes.Buffer
|
||||
header.WriteByte(fh.MessageType<<4 | boolToByte(fh.Dup)<<3 | fh.Qos<<1 | boolToByte(fh.Retain))
|
||||
header.Write(encodeLength(fh.RemainingLength))
|
||||
return header
|
||||
}
|
||||
|
||||
func (fh *FixedHeader) unpack(typeAndFlags byte, r io.Reader) {
|
||||
fh.MessageType = typeAndFlags >> 4
|
||||
fh.Dup = (typeAndFlags>>3)&0x01 > 0
|
||||
fh.Qos = (typeAndFlags >> 1) & 0x03
|
||||
fh.Retain = typeAndFlags&0x01 > 0
|
||||
fh.RemainingLength = decodeLength(r)
|
||||
}
|
||||
|
||||
func decodeByte(b io.Reader) byte {
|
||||
num := make([]byte, 1)
|
||||
b.Read(num)
|
||||
return num[0]
|
||||
}
|
||||
|
||||
func decodeUint16(b io.Reader) uint16 {
|
||||
num := make([]byte, 2)
|
||||
b.Read(num)
|
||||
return binary.BigEndian.Uint16(num)
|
||||
}
|
||||
|
||||
func encodeUint16(num uint16) []byte {
|
||||
bytes := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(bytes, num)
|
||||
return bytes
|
||||
}
|
||||
|
||||
func encodeString(field string) []byte {
|
||||
fieldLength := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
|
||||
return append(fieldLength, []byte(field)...)
|
||||
}
|
||||
|
||||
func decodeString(b io.Reader) string {
|
||||
fieldLength := decodeUint16(b)
|
||||
field := make([]byte, fieldLength)
|
||||
b.Read(field)
|
||||
return string(field)
|
||||
}
|
||||
|
||||
func decodeBytes(b io.Reader) []byte {
|
||||
fieldLength := decodeUint16(b)
|
||||
field := make([]byte, fieldLength)
|
||||
b.Read(field)
|
||||
return field
|
||||
}
|
||||
|
||||
func encodeBytes(field []byte) []byte {
|
||||
fieldLength := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(fieldLength, uint16(len(field)))
|
||||
return append(fieldLength, field...)
|
||||
}
|
||||
|
||||
func encodeLength(length int) []byte {
|
||||
var encLength []byte
|
||||
for {
|
||||
digit := byte(length % 128)
|
||||
length /= 128
|
||||
if length > 0 {
|
||||
digit |= 0x80
|
||||
}
|
||||
encLength = append(encLength, digit)
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return encLength
|
||||
}
|
||||
|
||||
func decodeLength(r io.Reader) int {
|
||||
var rLength uint32
|
||||
var multiplier uint32
|
||||
b := make([]byte, 1)
|
||||
for {
|
||||
io.ReadFull(r, b)
|
||||
digit := b[0]
|
||||
rLength |= uint32(digit&127) << multiplier
|
||||
if (digit & 128) == 0 {
|
||||
break
|
||||
}
|
||||
multiplier += 7
|
||||
}
|
||||
return int(rLength)
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPacketNames(t *testing.T) {
|
||||
if PacketNames[1] != "CONNECT" {
|
||||
t.Errorf("PacketNames[1] is %s, should be %s", PacketNames[1], "CONNECT")
|
||||
}
|
||||
if PacketNames[2] != "CONNACK" {
|
||||
t.Errorf("PacketNames[2] is %s, should be %s", PacketNames[2], "CONNACK")
|
||||
}
|
||||
if PacketNames[3] != "PUBLISH" {
|
||||
t.Errorf("PacketNames[3] is %s, should be %s", PacketNames[3], "PUBLISH")
|
||||
}
|
||||
if PacketNames[4] != "PUBACK" {
|
||||
t.Errorf("PacketNames[4] is %s, should be %s", PacketNames[4], "PUBACK")
|
||||
}
|
||||
if PacketNames[5] != "PUBREC" {
|
||||
t.Errorf("PacketNames[5] is %s, should be %s", PacketNames[5], "PUBREC")
|
||||
}
|
||||
if PacketNames[6] != "PUBREL" {
|
||||
t.Errorf("PacketNames[6] is %s, should be %s", PacketNames[6], "PUBREL")
|
||||
}
|
||||
if PacketNames[7] != "PUBCOMP" {
|
||||
t.Errorf("PacketNames[7] is %s, should be %s", PacketNames[7], "PUBCOMP")
|
||||
}
|
||||
if PacketNames[8] != "SUBSCRIBE" {
|
||||
t.Errorf("PacketNames[8] is %s, should be %s", PacketNames[8], "SUBSCRIBE")
|
||||
}
|
||||
if PacketNames[9] != "SUBACK" {
|
||||
t.Errorf("PacketNames[9] is %s, should be %s", PacketNames[9], "SUBACK")
|
||||
}
|
||||
if PacketNames[10] != "UNSUBSCRIBE" {
|
||||
t.Errorf("PacketNames[10] is %s, should be %s", PacketNames[10], "UNSUBSCRIBE")
|
||||
}
|
||||
if PacketNames[11] != "UNSUBACK" {
|
||||
t.Errorf("PacketNames[11] is %s, should be %s", PacketNames[11], "UNSUBACK")
|
||||
}
|
||||
if PacketNames[12] != "PINGREQ" {
|
||||
t.Errorf("PacketNames[12] is %s, should be %s", PacketNames[12], "PINGREQ")
|
||||
}
|
||||
if PacketNames[13] != "PINGRESP" {
|
||||
t.Errorf("PacketNames[13] is %s, should be %s", PacketNames[13], "PINGRESP")
|
||||
}
|
||||
if PacketNames[14] != "DISCONNECT" {
|
||||
t.Errorf("PacketNames[14] is %s, should be %s", PacketNames[14], "DISCONNECT")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPacketConsts(t *testing.T) {
|
||||
if Connect != 1 {
|
||||
t.Errorf("Const for Connect is %d, should be %d", Connect, 1)
|
||||
}
|
||||
if Connack != 2 {
|
||||
t.Errorf("Const for Connack is %d, should be %d", Connack, 2)
|
||||
}
|
||||
if Publish != 3 {
|
||||
t.Errorf("Const for Publish is %d, should be %d", Publish, 3)
|
||||
}
|
||||
if Puback != 4 {
|
||||
t.Errorf("Const for Puback is %d, should be %d", Puback, 4)
|
||||
}
|
||||
if Pubrec != 5 {
|
||||
t.Errorf("Const for Pubrec is %d, should be %d", Pubrec, 5)
|
||||
}
|
||||
if Pubrel != 6 {
|
||||
t.Errorf("Const for Pubrel is %d, should be %d", Pubrel, 6)
|
||||
}
|
||||
if Pubcomp != 7 {
|
||||
t.Errorf("Const for Pubcomp is %d, should be %d", Pubcomp, 7)
|
||||
}
|
||||
if Subscribe != 8 {
|
||||
t.Errorf("Const for Subscribe is %d, should be %d", Subscribe, 8)
|
||||
}
|
||||
if Suback != 9 {
|
||||
t.Errorf("Const for Suback is %d, should be %d", Suback, 9)
|
||||
}
|
||||
if Unsubscribe != 10 {
|
||||
t.Errorf("Const for Unsubscribe is %d, should be %d", Unsubscribe, 10)
|
||||
}
|
||||
if Unsuback != 11 {
|
||||
t.Errorf("Const for Unsuback is %d, should be %d", Unsuback, 11)
|
||||
}
|
||||
if Pingreq != 12 {
|
||||
t.Errorf("Const for Pingreq is %d, should be %d", Pingreq, 12)
|
||||
}
|
||||
if Pingresp != 13 {
|
||||
t.Errorf("Const for Pingresp is %d, should be %d", Pingresp, 13)
|
||||
}
|
||||
if Disconnect != 14 {
|
||||
t.Errorf("Const for Disconnect is %d, should be %d", Disconnect, 14)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnackConsts(t *testing.T) {
|
||||
if Accepted != 0x00 {
|
||||
t.Errorf("Const for Accepted is %d, should be %d", Accepted, 0)
|
||||
}
|
||||
if ErrRefusedBadProtocolVersion != 0x01 {
|
||||
t.Errorf("Const for RefusedBadProtocolVersion is %d, should be %d", ErrRefusedBadProtocolVersion, 1)
|
||||
}
|
||||
if ErrRefusedIDRejected != 0x02 {
|
||||
t.Errorf("Const for RefusedIDRejected is %d, should be %d", ErrRefusedIDRejected, 2)
|
||||
}
|
||||
if ErrRefusedServerUnavailable != 0x03 {
|
||||
t.Errorf("Const for RefusedServerUnavailable is %d, should be %d", ErrRefusedServerUnavailable, 3)
|
||||
}
|
||||
if ErrRefusedBadUsernameOrPassword != 0x04 {
|
||||
t.Errorf("Const for RefusedBadUsernameOrPassword is %d, should be %d", ErrRefusedBadUsernameOrPassword, 4)
|
||||
}
|
||||
if ErrRefusedNotAuthorised != 0x05 {
|
||||
t.Errorf("Const for RefusedNotAuthorised is %d, should be %d", ErrRefusedNotAuthorised, 5)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConnectPacket(t *testing.T) {
|
||||
connectPacketBytes := bytes.NewBuffer([]byte{16, 52, 0, 4, 77, 81, 84, 84, 4, 204, 0, 0, 0, 0, 0, 4, 116, 101, 115, 116, 0, 12, 84, 101, 115, 116, 32, 80, 97, 121, 108, 111, 97, 100, 0, 8, 116, 101, 115, 116, 117, 115, 101, 114, 0, 8, 116, 101, 115, 116, 112, 97, 115, 115})
|
||||
packet, err := ReadPacket(connectPacketBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error reading packet: %s", err.Error())
|
||||
}
|
||||
cp := packet.(*ConnectPacket)
|
||||
if cp.ProtocolName != "MQTT" {
|
||||
t.Errorf("Connect Packet ProtocolName is %s, should be %s", cp.ProtocolName, "MQTT")
|
||||
}
|
||||
if cp.ProtocolVersion != 4 {
|
||||
t.Errorf("Connect Packet ProtocolVersion is %d, should be %d", cp.ProtocolVersion, 4)
|
||||
}
|
||||
if cp.UsernameFlag != true {
|
||||
t.Errorf("Connect Packet UsernameFlag is %t, should be %t", cp.UsernameFlag, true)
|
||||
}
|
||||
if cp.Username != "testuser" {
|
||||
t.Errorf("Connect Packet Username is %s, should be %s", cp.Username, "testuser")
|
||||
}
|
||||
if cp.PasswordFlag != true {
|
||||
t.Errorf("Connect Packet PasswordFlag is %t, should be %t", cp.PasswordFlag, true)
|
||||
}
|
||||
if string(cp.Password) != "testpass" {
|
||||
t.Errorf("Connect Packet Password is %s, should be %s", string(cp.Password), "testpass")
|
||||
}
|
||||
if cp.WillFlag != true {
|
||||
t.Errorf("Connect Packet WillFlag is %t, should be %t", cp.WillFlag, true)
|
||||
}
|
||||
if cp.WillTopic != "test" {
|
||||
t.Errorf("Connect Packet WillTopic is %s, should be %s", cp.WillTopic, "test")
|
||||
}
|
||||
if cp.WillQos != 1 {
|
||||
t.Errorf("Connect Packet WillQos is %d, should be %d", cp.WillQos, 1)
|
||||
}
|
||||
if cp.WillRetain != false {
|
||||
t.Errorf("Connect Packet WillRetain is %t, should be %t", cp.WillRetain, false)
|
||||
}
|
||||
if string(cp.WillMessage) != "Test Payload" {
|
||||
t.Errorf("Connect Packet WillMessage is %s, should be %s", string(cp.WillMessage), "Test Payload")
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PingreqPacket is an internal representation of the fields of the
|
||||
//Pingreq MQTT packet
|
||||
type PingreqPacket struct {
|
||||
FixedHeader
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pr *PingreqPacket) Write(w io.Writer) error {
|
||||
packet := pr.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pr *PingreqPacket) Unpack(b io.Reader) {
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pr *PingreqPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pr *PingreqPacket) UUID() uuid.UUID {
|
||||
return pr.uuid
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PingrespPacket is an internal representation of the fields of the
|
||||
//Pingresp MQTT packet
|
||||
type PingrespPacket struct {
|
||||
FixedHeader
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) String() string {
|
||||
str := fmt.Sprintf("%s", pr.FixedHeader)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pr *PingrespPacket) Write(w io.Writer) error {
|
||||
packet := pr.FixedHeader.pack()
|
||||
_, err := packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pr *PingrespPacket) Unpack(b io.Reader) {
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pr *PingrespPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: 0}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pr *PingrespPacket) UUID() uuid.UUID {
|
||||
return pr.uuid
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubackPacket is an internal representation of the fields of the
|
||||
//Puback MQTT packet
|
||||
type PubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", pa.FixedHeader)
|
||||
str += fmt.Sprintf("messageID: %d", pa.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pa *PubackPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pa.FixedHeader.RemainingLength = 2
|
||||
packet := pa.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pa.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pa *PubackPacket) Unpack(b io.Reader) {
|
||||
pa.MessageID = decodeUint16(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pa *PubackPacket) Details() Details {
|
||||
return Details{Qos: pa.Qos, MessageID: pa.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pa *PubackPacket) UUID() uuid.UUID {
|
||||
return pa.uuid
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubcompPacket is an internal representation of the fields of the
|
||||
//Pubcomp MQTT packet
|
||||
type PubcompPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", pc.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", pc.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pc *PubcompPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pc.FixedHeader.RemainingLength = 2
|
||||
packet := pc.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pc.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pc *PubcompPacket) Unpack(b io.Reader) {
|
||||
pc.MessageID = decodeUint16(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pc *PubcompPacket) Details() Details {
|
||||
return Details{Qos: pc.Qos, MessageID: pc.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pc *PubcompPacket) UUID() uuid.UUID {
|
||||
return pc.uuid
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PublishPacket is an internal representation of the fields of the
|
||||
//Publish MQTT packet
|
||||
type PublishPacket struct {
|
||||
FixedHeader
|
||||
TopicName string
|
||||
MessageID uint16
|
||||
Payload []byte
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (p *PublishPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", p.FixedHeader)
|
||||
str += fmt.Sprintf("topicName: %s MessageID: %d\n", p.TopicName, p.MessageID)
|
||||
str += fmt.Sprintf("payload: %s\n", string(p.Payload))
|
||||
return str
|
||||
}
|
||||
|
||||
func (p *PublishPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeString(p.TopicName))
|
||||
if p.Qos > 0 {
|
||||
body.Write(encodeUint16(p.MessageID))
|
||||
}
|
||||
p.FixedHeader.RemainingLength = body.Len() + len(p.Payload)
|
||||
packet := p.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
packet.Write(p.Payload)
|
||||
_, err = w.Write(packet.Bytes())
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (p *PublishPacket) Unpack(b io.Reader) {
|
||||
var payloadLength = p.FixedHeader.RemainingLength
|
||||
p.TopicName = decodeString(b)
|
||||
if p.Qos > 0 {
|
||||
p.MessageID = decodeUint16(b)
|
||||
payloadLength -= len(p.TopicName) + 4
|
||||
} else {
|
||||
payloadLength -= len(p.TopicName) + 2
|
||||
}
|
||||
p.Payload = make([]byte, payloadLength)
|
||||
b.Read(p.Payload)
|
||||
}
|
||||
|
||||
//Copy creates a new PublishPacket with the same topic and payload
|
||||
//but an empty fixed header, useful for when you want to deliver
|
||||
//a message with different properties such as Qos but the same
|
||||
//content
|
||||
func (p *PublishPacket) Copy() *PublishPacket {
|
||||
newP := NewControlPacket(Publish).(*PublishPacket)
|
||||
newP.TopicName = p.TopicName
|
||||
newP.Payload = p.Payload
|
||||
|
||||
return newP
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (p *PublishPacket) Details() Details {
|
||||
return Details{Qos: p.Qos, MessageID: p.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (p *PublishPacket) UUID() uuid.UUID {
|
||||
return p.uuid
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubrecPacket is an internal representation of the fields of the
|
||||
//Pubrec MQTT packet
|
||||
type PubrecPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", pr.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pr *PubrecPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pr.FixedHeader.RemainingLength = 2
|
||||
packet := pr.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pr.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pr *PubrecPacket) Unpack(b io.Reader) {
|
||||
pr.MessageID = decodeUint16(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pr *PubrecPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pr *PubrecPacket) UUID() uuid.UUID {
|
||||
return pr.uuid
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//PubrelPacket is an internal representation of the fields of the
|
||||
//Pubrel MQTT packet
|
||||
type PubrelPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", pr.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", pr.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (pr *PubrelPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
pr.FixedHeader.RemainingLength = 2
|
||||
packet := pr.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(pr.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (pr *PubrelPacket) Unpack(b io.Reader) {
|
||||
pr.MessageID = decodeUint16(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (pr *PubrelPacket) Details() Details {
|
||||
return Details{Qos: pr.Qos, MessageID: pr.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (pr *PubrelPacket) UUID() uuid.UUID {
|
||||
return pr.uuid
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//SubackPacket is an internal representation of the fields of the
|
||||
//Suback MQTT packet
|
||||
type SubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
GrantedQoss []byte
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", sa.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", sa.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (sa *SubackPacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
body.Write(encodeUint16(sa.MessageID))
|
||||
body.Write(sa.GrantedQoss)
|
||||
sa.FixedHeader.RemainingLength = body.Len()
|
||||
packet := sa.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (sa *SubackPacket) Unpack(b io.Reader) {
|
||||
var qosBuffer bytes.Buffer
|
||||
sa.MessageID = decodeUint16(b)
|
||||
qosBuffer.ReadFrom(b)
|
||||
sa.GrantedQoss = qosBuffer.Bytes()
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (sa *SubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: sa.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (sa *SubackPacket) UUID() uuid.UUID {
|
||||
return sa.uuid
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//SubscribePacket is an internal representation of the fields of the
|
||||
//Subscribe MQTT packet
|
||||
type SubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
Topics []string
|
||||
Qoss []byte
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", s.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d topics: %s", s.MessageID, s.Topics)
|
||||
return str
|
||||
}
|
||||
|
||||
func (s *SubscribePacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
|
||||
body.Write(encodeUint16(s.MessageID))
|
||||
for i, topic := range s.Topics {
|
||||
body.Write(encodeString(topic))
|
||||
body.WriteByte(s.Qoss[i])
|
||||
}
|
||||
s.FixedHeader.RemainingLength = body.Len()
|
||||
packet := s.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (s *SubscribePacket) Unpack(b io.Reader) {
|
||||
s.MessageID = decodeUint16(b)
|
||||
payloadLength := s.FixedHeader.RemainingLength - 2
|
||||
for payloadLength > 0 {
|
||||
topic := decodeString(b)
|
||||
s.Topics = append(s.Topics, topic)
|
||||
qos := decodeByte(b)
|
||||
s.Qoss = append(s.Qoss, qos)
|
||||
payloadLength -= 2 + len(topic) + 1 //2 bytes of string length, plus string, plus 1 byte for Qos
|
||||
}
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (s *SubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: s.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (s *SubscribePacket) UUID() uuid.UUID {
|
||||
return s.uuid
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//UnsubackPacket is an internal representation of the fields of the
|
||||
//Unsuback MQTT packet
|
||||
type UnsubackPacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", ua.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", ua.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (ua *UnsubackPacket) Write(w io.Writer) error {
|
||||
var err error
|
||||
ua.FixedHeader.RemainingLength = 2
|
||||
packet := ua.FixedHeader.pack()
|
||||
packet.Write(encodeUint16(ua.MessageID))
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (ua *UnsubackPacket) Unpack(b io.Reader) {
|
||||
ua.MessageID = decodeUint16(b)
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (ua *UnsubackPacket) Details() Details {
|
||||
return Details{Qos: 0, MessageID: ua.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (ua *UnsubackPacket) UUID() uuid.UUID {
|
||||
return ua.uuid
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
package packets
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/pborman/uuid"
|
||||
"io"
|
||||
)
|
||||
|
||||
//UnsubscribePacket is an internal representation of the fields of the
|
||||
//Unsubscribe MQTT packet
|
||||
type UnsubscribePacket struct {
|
||||
FixedHeader
|
||||
MessageID uint16
|
||||
Topics []string
|
||||
uuid uuid.UUID
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) String() string {
|
||||
str := fmt.Sprintf("%s\n", u.FixedHeader)
|
||||
str += fmt.Sprintf("MessageID: %d", u.MessageID)
|
||||
return str
|
||||
}
|
||||
|
||||
func (u *UnsubscribePacket) Write(w io.Writer) error {
|
||||
var body bytes.Buffer
|
||||
var err error
|
||||
body.Write(encodeUint16(u.MessageID))
|
||||
for _, topic := range u.Topics {
|
||||
body.Write(encodeString(topic))
|
||||
}
|
||||
u.FixedHeader.RemainingLength = body.Len()
|
||||
packet := u.FixedHeader.pack()
|
||||
packet.Write(body.Bytes())
|
||||
_, err = packet.WriteTo(w)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//Unpack decodes the details of a ControlPacket after the fixed
|
||||
//header has been read
|
||||
func (u *UnsubscribePacket) Unpack(b io.Reader) {
|
||||
u.MessageID = decodeUint16(b)
|
||||
var topic string
|
||||
for topic = decodeString(b); topic != ""; topic = decodeString(b) {
|
||||
u.Topics = append(u.Topics, topic)
|
||||
}
|
||||
}
|
||||
|
||||
//Details returns a Details struct containing the Qos and
|
||||
//MessageID of this ControlPacket
|
||||
func (u *UnsubscribePacket) Details() Details {
|
||||
return Details{Qos: 1, MessageID: u.MessageID}
|
||||
}
|
||||
|
||||
//UUID returns the unique ID assigned to the ControlPacket when
|
||||
//it was originally received. Note: this is not related to the
|
||||
//MessageID field for MQTT packets
|
||||
func (u *UnsubscribePacket) UUID() uuid.UUID {
|
||||
return u.uuid
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type lastcontact struct {
|
||||
sync.Mutex
|
||||
lasttime time.Time
|
||||
}
|
||||
|
||||
func (l *lastcontact) update() {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.lasttime = time.Now()
|
||||
|
||||
}
|
||||
|
||||
func (l *lastcontact) get() time.Time {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
return l.lasttime
|
||||
}
|
||||
|
||||
func keepalive(c *Client) {
|
||||
DEBUG.Println(PNG, "keepalive starting")
|
||||
c.pingOutstanding = false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.stop:
|
||||
DEBUG.Println(PNG, "keepalive stopped")
|
||||
c.workers.Done()
|
||||
return
|
||||
default:
|
||||
last := uint(time.Since(c.lastContact.get()).Seconds())
|
||||
//DEBUG.Printf("%s last contact: %d (timeout: %d)", PNG, last, uint(c.options.KeepAlive.Seconds()))
|
||||
if last > uint(c.options.KeepAlive.Seconds()) {
|
||||
if !c.pingOutstanding {
|
||||
DEBUG.Println(PNG, "keepalive sending ping")
|
||||
ping := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||
//We don't want to wait behind large messages being sent, the Write call
|
||||
//will block until it it able to send the packet.
|
||||
ping.Write(c.conn)
|
||||
c.pingOutstanding = true
|
||||
} else {
|
||||
CRITICAL.Println(PNG, "pingresp not received, disconnecting")
|
||||
c.workers.Done()
|
||||
c.internalConnLost(errors.New("pingresp not received, disconnecting"))
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,162 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// route is a type which associates MQTT Topic strings with a
|
||||
// callback to be executed upon the arrival of a message associated
|
||||
// with a subscription to that topic.
|
||||
type route struct {
|
||||
topic string
|
||||
callback MessageHandler
|
||||
}
|
||||
|
||||
// match takes a slice of strings which represent the route being tested having been split on '/'
|
||||
// separators, and a slice of strings representing the topic string in the published message, similarly
|
||||
// split.
|
||||
// The function determines if the topic string matches the route according to the MQTT topic rules
|
||||
// and returns a boolean of the outcome
|
||||
func match(route []string, topic []string) bool {
|
||||
if len(route) == 0 {
|
||||
if len(topic) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if len(topic) == 0 {
|
||||
if route[0] == "#" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if route[0] == "#" {
|
||||
return true
|
||||
}
|
||||
|
||||
if (route[0] == "+") || (route[0] == topic[0]) {
|
||||
return match(route[1:], topic[1:])
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func routeIncludesTopic(route, topic string) bool {
|
||||
return match(strings.Split(route, "/"), strings.Split(topic, "/"))
|
||||
}
|
||||
|
||||
// match takes the topic string of the published message and does a basic compare to the
|
||||
// string of the current Route, if they match it returns true
|
||||
func (r *route) match(topic string) bool {
|
||||
return r.topic == topic || routeIncludesTopic(r.topic, topic)
|
||||
}
|
||||
|
||||
type router struct {
|
||||
sync.RWMutex
|
||||
routes *list.List
|
||||
defaultHandler MessageHandler
|
||||
messages chan *packets.PublishPacket
|
||||
stop chan bool
|
||||
}
|
||||
|
||||
// newRouter returns a new instance of a Router and channel which can be used to tell the Router
|
||||
// to stop
|
||||
func newRouter() (*router, chan bool) {
|
||||
router := &router{routes: list.New(), messages: make(chan *packets.PublishPacket), stop: make(chan bool)}
|
||||
stop := router.stop
|
||||
return router, stop
|
||||
}
|
||||
|
||||
// addRoute takes a topic string and MessageHandler callback. It looks in the current list of
|
||||
// routes to see if there is already a matching Route. If there is it replaces the current
|
||||
// callback with the new one. If not it add a new entry to the list of Routes.
|
||||
func (r *router) addRoute(topic string, callback MessageHandler) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(topic) {
|
||||
r := e.Value.(*route)
|
||||
r.callback = callback
|
||||
return
|
||||
}
|
||||
}
|
||||
r.routes.PushBack(&route{topic: topic, callback: callback})
|
||||
}
|
||||
|
||||
// deleteRoute takes a route string, looks for a matching Route in the list of Routes. If
|
||||
// found it removes the Route from the list.
|
||||
func (r *router) deleteRoute(topic string) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(topic) {
|
||||
r.routes.Remove(e)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setDefaultHandler assigns a default callback that will be called if no matching Route
|
||||
// is found for an incoming Publish.
|
||||
func (r *router) setDefaultHandler(handler MessageHandler) {
|
||||
r.defaultHandler = handler
|
||||
}
|
||||
|
||||
// matchAndDispatch takes a channel of Message pointers as input and starts a go routine that
|
||||
// takes messages off the channel, matches them against the internal route list and calls the
|
||||
// associated callback (or the defaultHandler, if one exists and no other route matched). If
|
||||
// anything is sent down the stop channel the function will end.
|
||||
func (r *router) matchAndDispatch(messages <-chan *packets.PublishPacket, order bool, client *Client) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case message := <-messages:
|
||||
sent := false
|
||||
r.RLock()
|
||||
for e := r.routes.Front(); e != nil; e = e.Next() {
|
||||
if e.Value.(*route).match(message.TopicName) {
|
||||
if order {
|
||||
r.RUnlock()
|
||||
e.Value.(*route).callback(client, messageFromPublish(message))
|
||||
r.RLock()
|
||||
} else {
|
||||
go e.Value.(*route).callback(client, messageFromPublish(message))
|
||||
}
|
||||
sent = true
|
||||
}
|
||||
}
|
||||
r.RUnlock()
|
||||
if !sent && r.defaultHandler != nil {
|
||||
if order {
|
||||
r.RLock()
|
||||
r.defaultHandler(client, messageFromPublish(message))
|
||||
r.RUnlock()
|
||||
} else {
|
||||
go r.defaultHandler(client, messageFromPublish(message))
|
||||
}
|
||||
}
|
||||
case <-r.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
go clean
|
||||
|
||||
for file in *.go
|
||||
do
|
||||
echo -n "Compiling $file ..."
|
||||
go build "$file"
|
||||
echo " done."
|
||||
done
|
|
@ -1,23 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
|
||||
opts.SetCleanSession(true)
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
fmt.Println("plz mosquitto goes down now")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
c.Disconnect(200)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
// This demonstrates how to implement your own Store interface and provide
|
||||
// it to the go-mqtt client.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
)
|
||||
|
||||
// This NoOpStore type implements the go-mqtt/Store interface, which
|
||||
// allows it to be used by the go-mqtt client library. However, it is
|
||||
// highly recommended that you do not use this NoOpStore in production,
|
||||
// because it will NOT provide any sort of guaruntee of message delivery.
|
||||
type NoOpStore struct {
|
||||
// Contain nothing
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Open() {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Put(string, packets.ControlPacket) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Get(string) packets.ControlPacket {
|
||||
// Do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Del(string) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
func (store *NoOpStore) All() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Close() {
|
||||
// Do Nothing
|
||||
}
|
||||
|
||||
func (store *NoOpStore) Reset() {
|
||||
// Do Nothing
|
||||
}
|
||||
|
||||
func main() {
|
||||
myNoOpStore := &NoOpStore{}
|
||||
|
||||
opts := MQTT.NewClientOptions()
|
||||
opts.AddBroker("tcp://iot.eclipse.org:1883")
|
||||
opts.SetClientID("custom-store")
|
||||
opts.SetStore(myNoOpStore)
|
||||
|
||||
var callback MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
c.Subscribe("/go-mqtt/sample", 0, callback)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
text := fmt.Sprintf("this is msg #%d!", i)
|
||||
token := c.Publish("/go-mqtt/sample", 0, false, text)
|
||||
token.Wait()
|
||||
}
|
||||
|
||||
for i := 1; i < 5; i++ {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
c.Disconnect(250)
|
||||
}
|
|
@ -1,745 +0,0 @@
|
|||
# Config file for mosquitto
|
||||
#
|
||||
# See mosquitto.conf(5) for more information.
|
||||
#
|
||||
# Default values are shown, uncomment to change.
|
||||
#
|
||||
# Use the # character to indicate a comment, but only if it is the
|
||||
# very first character on the line.
|
||||
|
||||
# =================================================================
|
||||
# General configuration
|
||||
# =================================================================
|
||||
|
||||
# Time in seconds to wait before resending an outgoing QoS=1 or
|
||||
# QoS=2 message.
|
||||
#retry_interval 20
|
||||
|
||||
# Time in seconds between updates of the $SYS tree.
|
||||
# Set to 0 to disable the publishing of the $SYS tree.
|
||||
#sys_interval 10
|
||||
|
||||
# Time in seconds between cleaning the internal message store of
|
||||
# unreferenced messages. Lower values will result in lower memory
|
||||
# usage but more processor time, higher values will have the
|
||||
# opposite effect.
|
||||
# Setting a value of 0 means the unreferenced messages will be
|
||||
# disposed of as quickly as possible.
|
||||
#store_clean_interval 10
|
||||
|
||||
# Write process id to a file. Default is a blank string which means
|
||||
# a pid file shouldn't be written.
|
||||
# This should be set to /var/run/mosquitto.pid if mosquitto is
|
||||
# being run automatically on boot with an init script and
|
||||
# start-stop-daemon or similar.
|
||||
#pid_file
|
||||
|
||||
# When run as root, drop privileges to this user and its primary
|
||||
# group.
|
||||
# Leave blank to stay as root, but this is not recommended.
|
||||
# If run as a non-root user, this setting has no effect.
|
||||
# Note that on Windows this has no effect and so mosquitto should
|
||||
# be started by the user you wish it to run as.
|
||||
#user mosquitto
|
||||
|
||||
# The maximum number of QoS 1 and 2 messages currently inflight per
|
||||
# client.
|
||||
# This includes messages that are partway through handshakes and
|
||||
# those that are being retried. Defaults to 20. Set to 0 for no
|
||||
# maximum. Setting to 1 will guarantee in-order delivery of QoS 1
|
||||
# and 2 messages.
|
||||
#max_inflight_messages 20
|
||||
|
||||
# The maximum number of QoS 1 and 2 messages to hold in a queue
|
||||
# above those that are currently in-flight. Defaults to 100. Set
|
||||
# to 0 for no maximum (not recommended).
|
||||
# See also queue_qos0_messages.
|
||||
#max_queued_messages 100
|
||||
|
||||
# Set to true to queue messages with QoS 0 when a persistent client is
|
||||
# disconnected. These messages are included in the limit imposed by
|
||||
# max_queued_messages.
|
||||
# Defaults to false.
|
||||
# This is a non-standard option for the MQTT v3.1 spec but is allowed in
|
||||
# v3.1.1.
|
||||
#queue_qos0_messages false
|
||||
|
||||
# This option sets the maximum publish payload size that the broker will allow.
|
||||
# Received messages that exceed this size will not be accepted by the broker.
|
||||
# The default value is 0, which means that all valid MQTT messages are
|
||||
# accepted. MQTT imposes a maximum payload size of 268435455 bytes.
|
||||
#message_size_limit 0
|
||||
|
||||
# This option controls whether a client is allowed to connect with a zero
|
||||
# length client id or not. This option only affects clients using MQTT v3.1.1
|
||||
# and later. If set to false, clients connecting with a zero length client id
|
||||
# are disconnected. If set to true, clients will be allocated a client id by
|
||||
# the broker. This means it is only useful for clients with clean session set
|
||||
# to true.
|
||||
#allow_zero_length_clientid true
|
||||
|
||||
# If allow_zero_length_clientid is true, this option allows you to set a prefix
|
||||
# to automatically generated client ids to aid visibility in logs.
|
||||
#auto_id_prefix
|
||||
|
||||
# This option allows persistent clients (those with clean session set to false)
|
||||
# to be removed if they do not reconnect within a certain time frame.
|
||||
#
|
||||
# This is a non-standard option in MQTT V3.1 but allowed in MQTT v3.1.1.
|
||||
#
|
||||
# Badly designed clients may set clean session to false whilst using a randomly
|
||||
# generated client id. This leads to persistent clients that will never
|
||||
# reconnect. This option allows these clients to be removed.
|
||||
#
|
||||
# The expiration period should be an integer followed by one of d w m y for
|
||||
# day, week, month and year respectively. For example
|
||||
#
|
||||
# persistent_client_expiration 2m
|
||||
# persistent_client_expiration 14d
|
||||
# persistent_client_expiration 1y
|
||||
#
|
||||
# The default if not set is to never expire persistent clients.
|
||||
#persistent_client_expiration
|
||||
|
||||
# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/#
|
||||
# and foo/+/baz , then MQTT expects that when the broker receives a message on
|
||||
# a topic that matches both subscriptions, such as foo/bar/baz, then the client
|
||||
# should only receive the message once.
|
||||
# Mosquitto keeps track of which clients a message has been sent to in order to
|
||||
# meet this requirement. The allow_duplicate_messages option allows this
|
||||
# behaviour to be disabled, which may be useful if you have a large number of
|
||||
# clients subscribed to the same set of topics and are very concerned about
|
||||
# minimising memory usage.
|
||||
# It can be safely set to true if you know in advance that your clients will
|
||||
# never have overlapping subscriptions, otherwise your clients must be able to
|
||||
# correctly deal with duplicate messages even when then have QoS=2.
|
||||
#allow_duplicate_messages false
|
||||
|
||||
# The MQTT specification requires that the QoS of a message delivered to a
|
||||
# subscriber is never upgraded to match the QoS of the subscription. Enabling
|
||||
# this option changes this behaviour. If upgrade_outgoing_qos is set true,
|
||||
# messages sent to a subscriber will always match the QoS of its subscription.
|
||||
# This is a non-standard option explicitly disallowed by the spec.
|
||||
#upgrade_outgoing_qos false
|
||||
|
||||
# =================================================================
|
||||
# Default listener
|
||||
# =================================================================
|
||||
|
||||
# IP address/hostname to bind the default listener to. If not
|
||||
# given, the default listener will not be bound to a specific
|
||||
# address and so will be accessible to all network interfaces.
|
||||
# bind_address ip-address/host name
|
||||
#bind_address
|
||||
|
||||
# Port to use for the default listener.
|
||||
#port 1883
|
||||
|
||||
# The maximum number of client connections to allow. This is
|
||||
# a per listener setting.
|
||||
# Default is -1, which means unlimited connections.
|
||||
# Note that other process limits mean that unlimited connections
|
||||
# are not really possible. Typically the default maximum number of
|
||||
# connections possible is around 1024.
|
||||
#max_connections -1
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Certificate based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# The following options can be used to enable SSL/TLS support for
|
||||
# this listener. Note that the recommended port for MQTT over TLS
|
||||
# is 8883, but this must be set manually.
|
||||
#
|
||||
# See also the mosquitto-tls man page.
|
||||
|
||||
# At least one of cafile or capath must be defined. They both
|
||||
# define methods of accessing the PEM encoded Certificate
|
||||
# Authority certificates that have signed your server certificate
|
||||
# and that you wish to trust.
|
||||
# cafile defines the path to a file containing the CA certificates.
|
||||
# capath defines a directory that will be searched for files
|
||||
# containing the CA certificates. For capath to work correctly, the
|
||||
# certificate files must have ".crt" as the file ending and you must run
|
||||
# "c_rehash <path to capath>" each time you add/remove a certificate.
|
||||
#cafile
|
||||
#capath
|
||||
|
||||
# Path to the PEM encoded server certificate.
|
||||
#certfile
|
||||
|
||||
# Path to the PEM encoded keyfile.
|
||||
#keyfile
|
||||
|
||||
# This option defines the version of the TLS protocol to use for this listener.
|
||||
# The default value will always be the highest version that is available for
|
||||
# the version of openssl that the broker was compiled against. For openssl >=
|
||||
# 1.0.1 the valid values are tlsv1.2 tlsv1.1 and tlsv1. For openssl < 1.0.1 the
|
||||
# valid values are tlsv1.
|
||||
#tls_version
|
||||
|
||||
# By default a TLS enabled listener will operate in a similar fashion to a
|
||||
# https enabled web server, in that the server has a certificate signed by a CA
|
||||
# and the client will verify that it is a trusted certificate. The overall aim
|
||||
# is encryption of the network traffic. By setting require_certificate to true,
|
||||
# the client must provide a valid certificate in order for the network
|
||||
# connection to proceed. This allows access to the broker to be controlled
|
||||
# outside of the mechanisms provided by MQTT.
|
||||
#require_certificate false
|
||||
|
||||
# If require_certificate is true, you may set use_identity_as_username to true
|
||||
# to use the CN value from the client certificate as a username. If this is
|
||||
# true, the password_file option will not be used for this listener.
|
||||
#use_identity_as_username false
|
||||
|
||||
# If you have require_certificate set to true, you can create a certificate
|
||||
# revocation list file to revoke access to particular client certificates. If
|
||||
# you have done this, use crlfile to point to the PEM encoded revocation file.
|
||||
#crlfile
|
||||
|
||||
# If you wish to control which encryption ciphers are used, use the ciphers
|
||||
# option. The list of available ciphers can be optained using the "openssl
|
||||
# ciphers" command and should be provided in the same format as the output of
|
||||
# that command.
|
||||
# If unset defaults to DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH
|
||||
#ciphers DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Pre-shared-key based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# The following options can be used to enable PSK based SSL/TLS support for
|
||||
# this listener. Note that the recommended port for MQTT over TLS is 8883, but
|
||||
# this must be set manually.
|
||||
#
|
||||
# See also the mosquitto-tls man page and the "Certificate based SSL/TLS
|
||||
# support" section. Only one of certificate or PSK encryption support can be
|
||||
# enabled for any listener.
|
||||
|
||||
# The psk_hint option enables pre-shared-key support for this listener and also
|
||||
# acts as an identifier for this listener. The hint is sent to clients and may
|
||||
# be used locally to aid authentication. The hint is a free form string that
|
||||
# doesn't have much meaning in itself, so feel free to be creative.
|
||||
# If this option is provided, see psk_file to define the pre-shared keys to be
|
||||
# used or create a security plugin to handle them.
|
||||
#psk_hint
|
||||
|
||||
# Set use_identity_as_username to have the psk identity sent by the client used
|
||||
# as its username. Authentication will be carried out using the PSK rather than
|
||||
# the MQTT username/password and so password_file will not be used for this
|
||||
# listener.
|
||||
#use_identity_as_username false
|
||||
|
||||
# When using PSK, the encryption ciphers used will be chosen from the list of
|
||||
# available PSK ciphers. If you want to control which ciphers are available,
|
||||
# use the "ciphers" option. The list of available ciphers can be optained
|
||||
# using the "openssl ciphers" command and should be provided in the same format
|
||||
# as the output of that command.
|
||||
#ciphers
|
||||
|
||||
# =================================================================
|
||||
# Extra listeners
|
||||
# =================================================================
|
||||
|
||||
# Listen on a port/ip address combination. By using this variable
|
||||
# multiple times, mosquitto can listen on more than one port. If
|
||||
# this variable is used and neither bind_address nor port given,
|
||||
# then the default listener will not be started.
|
||||
# The port number to listen on must be given. Optionally, an ip
|
||||
# address or host name may be supplied as a second argument. In
|
||||
# this case, mosquitto will attempt to bind the listener to that
|
||||
# address and so restrict access to the associated network and
|
||||
# interface. By default, mosquitto will listen on all interfaces.
|
||||
# listener port-number [ip address/host name]
|
||||
#listener
|
||||
|
||||
# The maximum number of client connections to allow. This is
|
||||
# a per listener setting.
|
||||
# Default is -1, which means unlimited connections.
|
||||
# Note that other process limits mean that unlimited connections
|
||||
# are not really possible. Typically the default maximum number of
|
||||
# connections possible is around 1024.
|
||||
#max_connections -1
|
||||
|
||||
# The listener can be restricted to operating within a topic hierarchy using
|
||||
# the mount_point option. This is achieved be prefixing the mount_point string
|
||||
# to all topics for any clients connected to this listener. This prefixing only
|
||||
# happens internally to the broker; the client will not see the prefix.
|
||||
#mount_point
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Certificate based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# The following options can be used to enable certificate based SSL/TLS support
|
||||
# for this listener. Note that the recommended port for MQTT over TLS is 8883,
|
||||
# but this must be set manually.
|
||||
#
|
||||
# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS
|
||||
# support" section. Only one of certificate or PSK encryption support can be
|
||||
# enabled for any listener.
|
||||
|
||||
# At least one of cafile or capath must be defined to enable certificate based
|
||||
# TLS encryption. They both define methods of accessing the PEM encoded
|
||||
# Certificate Authority certificates that have signed your server certificate
|
||||
# and that you wish to trust.
|
||||
# cafile defines the path to a file containing the CA certificates.
|
||||
# capath defines a directory that will be searched for files
|
||||
# containing the CA certificates. For capath to work correctly, the
|
||||
# certificate files must have ".crt" as the file ending and you must run
|
||||
# "c_rehash <path to capath>" each time you add/remove a certificate.
|
||||
#cafile
|
||||
#capath
|
||||
|
||||
# Path to the PEM encoded server certificate.
|
||||
#certfile
|
||||
|
||||
# Path to the PEM encoded keyfile.
|
||||
#keyfile
|
||||
|
||||
# By default an TLS enabled listener will operate in a similar fashion to a
|
||||
# https enabled web server, in that the server has a certificate signed by a CA
|
||||
# and the client will verify that it is a trusted certificate. The overall aim
|
||||
# is encryption of the network traffic. By setting require_certificate to true,
|
||||
# the client must provide a valid certificate in order for the network
|
||||
# connection to proceed. This allows access to the broker to be controlled
|
||||
# outside of the mechanisms provided by MQTT.
|
||||
#require_certificate false
|
||||
|
||||
# If require_certificate is true, you may set use_identity_as_username to true
|
||||
# to use the CN value from the client certificate as a username. If this is
|
||||
# true, the password_file option will not be used for this listener.
|
||||
#use_identity_as_username false
|
||||
|
||||
# If you have require_certificate set to true, you can create a certificate
|
||||
# revocation list file to revoke access to particular client certificates. If
|
||||
# you have done this, use crlfile to point to the PEM encoded revocation file.
|
||||
#crlfile
|
||||
|
||||
# If you wish to control which encryption ciphers are used, use the ciphers
|
||||
# option. The list of available ciphers can be optained using the "openssl
|
||||
# ciphers" command and should be provided in the same format as the output of
|
||||
# that command.
|
||||
#ciphers
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Pre-shared-key based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# The following options can be used to enable PSK based SSL/TLS support for
|
||||
# this listener. Note that the recommended port for MQTT over TLS is 8883, but
|
||||
# this must be set manually.
|
||||
#
|
||||
# See also the mosquitto-tls man page and the "Certificate based SSL/TLS
|
||||
# support" section. Only one of certificate or PSK encryption support can be
|
||||
# enabled for any listener.
|
||||
|
||||
# The psk_hint option enables pre-shared-key support for this listener and also
|
||||
# acts as an identifier for this listener. The hint is sent to clients and may
|
||||
# be used locally to aid authentication. The hint is a free form string that
|
||||
# doesn't have much meaning in itself, so feel free to be creative.
|
||||
# If this option is provided, see psk_file to define the pre-shared keys to be
|
||||
# used or create a security plugin to handle them.
|
||||
#psk_hint
|
||||
|
||||
# Set use_identity_as_username to have the psk identity sent by the client used
|
||||
# as its username. Authentication will be carried out using the PSK rather than
|
||||
# the MQTT username/password and so password_file will not be used for this
|
||||
# listener.
|
||||
#use_identity_as_username false
|
||||
|
||||
# When using PSK, the encryption ciphers used will be chosen from the list of
|
||||
# available PSK ciphers. If you want to control which ciphers are available,
|
||||
# use the "ciphers" option. The list of available ciphers can be optained
|
||||
# using the "openssl ciphers" command and should be provided in the same format
|
||||
# as the output of that command.
|
||||
#ciphers
|
||||
|
||||
# =================================================================
|
||||
# Persistence
|
||||
# =================================================================
|
||||
|
||||
# If persistence is enabled, save the in-memory database to disk
|
||||
# every autosave_interval seconds. If set to 0, the persistence
|
||||
# database will only be written when mosquitto exits. See also
|
||||
# autosave_on_changes.
|
||||
# Note that writing of the persistence database can be forced by
|
||||
# sending mosquitto a SIGUSR1 signal.
|
||||
#autosave_interval 1800
|
||||
|
||||
# If true, mosquitto will count the number of subscription changes, retained
|
||||
# messages received and queued messages and if the total exceeds
|
||||
# autosave_interval then the in-memory database will be saved to disk.
|
||||
# If false, mosquitto will save the in-memory database to disk by treating
|
||||
# autosave_interval as a time in seconds.
|
||||
#autosave_on_changes false
|
||||
|
||||
# Save persistent message data to disk (true/false).
|
||||
# This saves information about all messages, including
|
||||
# subscriptions, currently in-flight messages and retained
|
||||
# messages.
|
||||
# retained_persistence is a synonym for this option.
|
||||
#persistence false
|
||||
|
||||
# The filename to use for the persistent database, not including
|
||||
# the path.
|
||||
#persistence_file mosquitto.db
|
||||
|
||||
# Location for persistent database. Must include trailing /
|
||||
# Default is an empty string (current directory).
|
||||
# Set to e.g. /var/lib/mosquitto/ if running as a proper service on Linux or
|
||||
# similar.
|
||||
#persistence_location
|
||||
|
||||
# =================================================================
|
||||
# Logging
|
||||
# =================================================================
|
||||
|
||||
# Places to log to. Use multiple log_dest lines for multiple
|
||||
# logging destinations.
|
||||
# Possible destinations are: stdout stderr syslog topic file
|
||||
#
|
||||
# stdout and stderr log to the console on the named output.
|
||||
#
|
||||
# syslog uses the userspace syslog facility which usually ends up
|
||||
# in /var/log/messages or similar.
|
||||
#
|
||||
# topic logs to the broker topic '$SYS/broker/log/<severity>',
|
||||
# where severity is one of D, E, W, N, I, M which are debug, error,
|
||||
# warning, notice, information and message. Message type severity is used by
|
||||
# the subscribe/unsubscribe log_types and publishes log messages to
|
||||
# $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe.
|
||||
#
|
||||
# The file destination requires an additional parameter which is the file to be
|
||||
# logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be
|
||||
# closed and reopened when the broker receives a HUP signal. Only a single file
|
||||
# destination may be configured.
|
||||
#
|
||||
# Note that if the broker is running as a Windows service it will default to
|
||||
# "log_dest none" and neither stdout nor stderr logging is available.
|
||||
# Use "log_dest none" if you wish to disable logging.
|
||||
log_dest stdout
|
||||
|
||||
# Types of messages to log. Use multiple log_type lines for logging
|
||||
# multiple types of messages.
|
||||
# Possible types are: debug, error, warning, notice, information,
|
||||
# none, subscribe, unsubscribe, all.
|
||||
# Note that debug type messages are for decoding the incoming/outgoing
|
||||
# network packets. They are not logged in "topics".
|
||||
#log_type error
|
||||
#log_type warning
|
||||
#log_type notice
|
||||
log_type information
|
||||
|
||||
# If set to true, client connection and disconnection messages will be included
|
||||
# in the log.
|
||||
#connection_messages true
|
||||
|
||||
# If set to true, add a timestamp value to each log message.
|
||||
#log_timestamp true
|
||||
|
||||
# =================================================================
|
||||
# Security
|
||||
# =================================================================
|
||||
|
||||
# If set, only clients that have a matching prefix on their
|
||||
# clientid will be allowed to connect to the broker. By default,
|
||||
# all clients may connect.
|
||||
# For example, setting "secure-" here would mean a client "secure-
|
||||
# client" could connect but another with clientid "mqtt" couldn't.
|
||||
#clientid_prefixes
|
||||
|
||||
# Boolean value that determines whether clients that connect
|
||||
# without providing a username are allowed to connect. If set to
|
||||
# false then a password file should be created (see the
|
||||
# password_file option) to control authenticated client access.
|
||||
# Defaults to true.
|
||||
#allow_anonymous true
|
||||
|
||||
# In addition to the clientid_prefixes, allow_anonymous and TLS
|
||||
# authentication options, username based authentication is also
|
||||
# possible. The default support is described in "Default
|
||||
# authentication and topic access control" below. The auth_plugin
|
||||
# allows another authentication method to be used.
|
||||
# Specify the path to the loadable plugin and see the
|
||||
# "Authentication and topic access plugin options" section below.
|
||||
#auth_plugin
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Default authentication and topic access control
|
||||
# -----------------------------------------------------------------
|
||||
|
||||
# Control access to the broker using a password file. This file can be
|
||||
# generated using the mosquitto_passwd utility. If TLS support is not compiled
|
||||
# into mosquitto (it is recommended that TLS support should be included) then
|
||||
# plain text passwords are used, in which case the file should be a text file
|
||||
# with lines in the format:
|
||||
# username:password
|
||||
# The password (and colon) may be omitted if desired, although this
|
||||
# offers very little in the way of security.
|
||||
#
|
||||
# See the TLS client require_certificate and use_identity_as_username options
|
||||
# for alternative authentication options.
|
||||
password_file pwfile.example
|
||||
|
||||
# Access may also be controlled using a pre-shared-key file. This requires
|
||||
# TLS-PSK support and a listener configured to use it. The file should be text
|
||||
# lines in the format:
|
||||
# identity:key
|
||||
# The key should be in hexadecimal format without a leading "0x".
|
||||
#psk_file
|
||||
|
||||
# Control access to topics on the broker using an access control list
|
||||
# file. If this parameter is defined then only the topics listed will
|
||||
# have access.
|
||||
# If the first character of a line of the ACL file is a # it is treated as a
|
||||
# comment.
|
||||
# Topic access is added with lines of the format:
|
||||
#
|
||||
# topic [read|write] <topic>
|
||||
#
|
||||
# The access type is controlled using "read" or "write". This parameter
|
||||
# is optional - if not given then the access is read/write.
|
||||
# <topic> can contain the + or # wildcards as in subscriptions.
|
||||
#
|
||||
# The first set of topics are applied to anonymous clients, assuming
|
||||
# allow_anonymous is true. User specific topic ACLs are added after a
|
||||
# user line as follows:
|
||||
#
|
||||
# user <username>
|
||||
#
|
||||
# The username referred to here is the same as in password_file. It is
|
||||
# not the clientid.
|
||||
#
|
||||
#
|
||||
# If is also possible to define ACLs based on pattern substitution within the
|
||||
# topic. The patterns available for substition are:
|
||||
#
|
||||
# %c to match the client id of the client
|
||||
# %u to match the username of the client
|
||||
#
|
||||
# The substitution pattern must be the only text for that level of hierarchy.
|
||||
#
|
||||
# The form is the same as for the topic keyword, but using pattern as the
|
||||
# keyword.
|
||||
# Pattern ACLs apply to all users even if the "user" keyword has previously
|
||||
# been given.
|
||||
#
|
||||
# If using bridges with usernames and ACLs, connection messages can be allowed
|
||||
# with the following pattern:
|
||||
# pattern write $SYS/broker/connection/%c/state
|
||||
#
|
||||
# pattern [read|write] <topic>
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# pattern write sensor/%u/data
|
||||
#
|
||||
#acl_file
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Authentication and topic access plugin options
|
||||
# -----------------------------------------------------------------
|
||||
|
||||
# If the auth_plugin option above is used, define options to pass to the
|
||||
# plugin here as described by the plugin instructions. All options named
|
||||
# using the format auth_opt_* will be passed to the plugin, for example:
|
||||
#
|
||||
# auth_opt_db_host
|
||||
# auth_opt_db_port
|
||||
# auth_opt_db_username
|
||||
# auth_opt_db_password
|
||||
|
||||
|
||||
# =================================================================
|
||||
# Bridges
|
||||
# =================================================================
|
||||
|
||||
# A bridge is a way of connecting multiple MQTT brokers together.
|
||||
# Create a new bridge using the "connection" option as described below. Set
|
||||
# options for the bridges using the remaining parameters. You must specify the
|
||||
# address and at least one topic to subscribe to.
|
||||
# Each connection must have a unique name.
|
||||
# The address line may have multiple host address and ports specified. See
|
||||
# below in the round_robin description for more details on bridge behaviour if
|
||||
# multiple addresses are used.
|
||||
# The direction that the topic will be shared can be chosen by
|
||||
# specifying out, in or both, where the default value is out.
|
||||
# The QoS level of the bridged communication can be specified with the next
|
||||
# topic option. The default QoS level is 0, to change the QoS the topic
|
||||
# direction must also be given.
|
||||
# The local and remote prefix options allow a topic to be remapped when it is
|
||||
# bridged to/from the remote broker. This provides the ability to place a topic
|
||||
# tree in an appropriate location.
|
||||
# For more details see the mosquitto.conf man page.
|
||||
# Multiple topics can be specified per connection, but be careful
|
||||
# not to create any loops.
|
||||
# If you are using bridges with cleansession set to false (the default), then
|
||||
# you may get unexpected behaviour from incoming topics if you change what
|
||||
# topics you are subscribing to. This is because the remote broker keeps the
|
||||
# subscription for the old topic. If you have this problem, connect your bridge
|
||||
# with cleansession set to true, then reconnect with cleansession set to false
|
||||
# as normal.
|
||||
#connection <name>
|
||||
#address <host>[:<port>] [<host>[:<port>]]
|
||||
#topic <topic> [[[out | in | both] qos-level] local-prefix remote-prefix]
|
||||
|
||||
# If the bridge has more than one address given in the address/addresses
|
||||
# configuration, the round_robin option defines the behaviour of the bridge on
|
||||
# a failure of the bridge connection. If round_robin is false, the default
|
||||
# value, then the first address is treated as the main bridge connection. If
|
||||
# the connection fails, the other secondary addresses will be attempted in
|
||||
# turn. Whilst connected to a secondary bridge, the bridge will periodically
|
||||
# attempt to reconnect to the main bridge until successful.
|
||||
# If round_robin is true, then all addresses are treated as equals. If a
|
||||
# connection fails, the next address will be tried and if successful will
|
||||
# remain connected until it fails
|
||||
#round_robin false
|
||||
|
||||
# Set the client id for this bridge connection. If not defined,
|
||||
# this defaults to 'name.hostname' where name is the connection
|
||||
# name and hostname is the hostname of this computer.
|
||||
#clientid
|
||||
|
||||
# Set the clean session variable for this bridge.
|
||||
# When set to true, when the bridge disconnects for any reason, all
|
||||
# messages and subscriptions will be cleaned up on the remote
|
||||
# broker. Note that with cleansession set to true, there may be a
|
||||
# significant amount of retained messages sent when the bridge
|
||||
# reconnects after losing its connection.
|
||||
# When set to false, the subscriptions and messages are kept on the
|
||||
# remote broker, and delivered when the bridge reconnects.
|
||||
#cleansession false
|
||||
|
||||
# If set to true, publish notification messages to the local and remote brokers
|
||||
# giving information about the state of the bridge connection. Retained
|
||||
# messages are published to the topic $SYS/broker/connection/<clientid>/state
|
||||
# unless the notification_topic option is used.
|
||||
# If the message is 1 then the connection is active, or 0 if the connection has
|
||||
# failed.
|
||||
#notifications true
|
||||
|
||||
# Choose the topic on which notification messages for this bridge are
|
||||
# published. If not set, messages are published on the topic
|
||||
# $SYS/broker/connection/<clientid>/state
|
||||
#notification_topic
|
||||
|
||||
# Set the keepalive interval for this bridge connection, in
|
||||
# seconds.
|
||||
#keepalive_interval 60
|
||||
|
||||
# Set the start type of the bridge. This controls how the bridge starts and
|
||||
# can be one of three types: automatic, lazy and once. Note that RSMB provides
|
||||
# a fourth start type "manual" which isn't currently supported by mosquitto.
|
||||
#
|
||||
# "automatic" is the default start type and means that the bridge connection
|
||||
# will be started automatically when the broker starts and also restarted
|
||||
# after a short delay (30 seconds) if the connection fails.
|
||||
#
|
||||
# Bridges using the "lazy" start type will be started automatically when the
|
||||
# number of queued messages exceeds the number set with the "threshold"
|
||||
# parameter. It will be stopped automatically after the time set by the
|
||||
# "idle_timeout" parameter. Use this start type if you wish the connection to
|
||||
# only be active when it is needed.
|
||||
#
|
||||
# A bridge using the "once" start type will be started automatically when the
|
||||
# broker starts but will not be restarted if the connection fails.
|
||||
#start_type automatic
|
||||
|
||||
# Set the amount of time a bridge using the automatic start type will wait
|
||||
# until attempting to reconnect. Defaults to 30 seconds.
|
||||
#restart_timeout 30
|
||||
|
||||
# Set the amount of time a bridge using the lazy start type must be idle before
|
||||
# it will be stopped. Defaults to 60 seconds.
|
||||
#idle_timeout 60
|
||||
|
||||
# Set the number of messages that need to be queued for a bridge with lazy
|
||||
# start type to be restarted. Defaults to 10 messages.
|
||||
# Must be less than max_queued_messages.
|
||||
#threshold 10
|
||||
|
||||
# If try_private is set to true, the bridge will attempt to indicate to the
|
||||
# remote broker that it is a bridge not an ordinary client. If successful, this
|
||||
# means that loop detection will be more effective and that retained messages
|
||||
# will be propagated correctly. Not all brokers support this feature so it may
|
||||
# be necessary to set try_private to false if your bridge does not connect
|
||||
# properly.
|
||||
#try_private true
|
||||
|
||||
# Set the username to use when connecting to an MQTT v3.1 broker
|
||||
# that requires authentication.
|
||||
#username
|
||||
|
||||
# Set the password to use when connecting to an MQTT v3.1 broker
|
||||
# that requires authentication. This option is only used if
|
||||
# username is also set.
|
||||
#password
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# Certificate based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# Either bridge_cafile or bridge_capath must be defined to enable TLS support
|
||||
# for this bridge.
|
||||
# bridge_cafile defines the path to a file containing the
|
||||
# Certificate Authority certificates that have signed the remote broker
|
||||
# certificate.
|
||||
# bridge_capath defines a directory that will be searched for files containing
|
||||
# the CA certificates. For bridge_capath to work correctly, the certificate
|
||||
# files must have ".crt" as the file ending and you must run "c_rehash <path to
|
||||
# capath>" each time you add/remove a certificate.
|
||||
#bridge_cafile
|
||||
#bridge_capath
|
||||
|
||||
# Path to the PEM encoded client certificate, if required by the remote broker.
|
||||
#bridge_certfile
|
||||
|
||||
# Path to the PEM encoded client private key, if required by the remote broker.
|
||||
#bridge_keyfile
|
||||
|
||||
# When using certificate based encryption, bridge_insecure disables
|
||||
# verification of the server hostname in the server certificate. This can be
|
||||
# useful when testing initial server configurations, but makes it possible for
|
||||
# a malicious third party to impersonate your server through DNS spoofing, for
|
||||
# example. Use this option in testing only. If you need to resort to using this
|
||||
# option in a production environment, your setup is at fault and there is no
|
||||
# point using encryption.
|
||||
#bridge_insecure false
|
||||
|
||||
# -----------------------------------------------------------------
|
||||
# PSK based SSL/TLS support
|
||||
# -----------------------------------------------------------------
|
||||
# Pre-shared-key encryption provides an alternative to certificate based
|
||||
# encryption. A bridge can be configured to use PSK with the bridge_identity
|
||||
# and bridge_psk options. These are the client PSK identity, and pre-shared-key
|
||||
# in hexadecimal format with no "0x". Only one of certificate and PSK based
|
||||
# encryption can be used on one
|
||||
# bridge at once.
|
||||
#bridge_identity
|
||||
#bridge_psk
|
||||
|
||||
|
||||
# =================================================================
|
||||
# External config files
|
||||
# =================================================================
|
||||
|
||||
# External configuration files may be included by using the
|
||||
# include_dir option. This defines a directory that will be searched
|
||||
# for config files. All files that end in '.conf' will be loaded as
|
||||
# a configuration file. It is best to have this as the last option
|
||||
# in the main file. This option will only be processed from the main
|
||||
# configuration file. The directory specified must not contain the
|
||||
# main configuration file.
|
||||
#include_dir
|
||||
|
||||
# =================================================================
|
||||
# Unsupported rsmb options - for the future
|
||||
# =================================================================
|
||||
|
||||
#addresses
|
||||
#round_robin
|
||||
|
||||
# =================================================================
|
||||
# rsmb options - unlikely to ever be supported
|
||||
# =================================================================
|
||||
|
||||
#ffdc_output
|
||||
#max_log_entries
|
||||
#trace_level
|
||||
#trace_output
|
|
@ -1,3 +0,0 @@
|
|||
roger:$6$clQ4Ocu312S0qWgl$Cv2wUxgEN73c6C6jlBkswqR4AkHsvDLWvtEXZZ8NpsBLgP1WAo/qA+WXcmEN/mjDNgdUwcxRAveqNMs2xUVQYA==
|
||||
sub_client:$6$U+qg0/32F0g2Fh+n$fBPSkq/rfNyEQ/TkEjRgwGTTVBpvNhKSyGShovH9KHewsvJ731tD5Zx26IHhR5RYCICt0L9qBW0/KK31UkCliw==
|
||||
pub_client:$6$vxQ89y+7WrsnL2yn$fSPMmEZn9TSrC8s/jaPmxJ9NijWpkP2e7bMJLz78JXR1vW2x8+T3FZ23byJA6xs5Mt+LeOybAHwcUv0OCl40rA==
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
/*----------------------------------------------------------------------
|
||||
This sample is designed to demonstrate the ability to set individual
|
||||
callbacks on a per-subscription basis. There are three handlers in use:
|
||||
brokerLoadHandler - $SYS/broker/load/#
|
||||
brokerConnectionHandler - $SYS/broker/connection/#
|
||||
brokerClientHandler - $SYS/broker/clients/#
|
||||
The client will receive 100 messages total from those subscriptions,
|
||||
and then print the total number of messages received from each.
|
||||
It may take a few moments for the sample to complete running, as it
|
||||
must wait for messages to be published.
|
||||
-----------------------------------------------------------------------*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
var brokerLoad = make(chan bool)
|
||||
var brokerConnection = make(chan bool)
|
||||
var brokerClients = make(chan bool)
|
||||
|
||||
func brokerLoadHandler(client *MQTT.Client, msg MQTT.Message) {
|
||||
brokerLoad <- true
|
||||
fmt.Printf("BrokerLoadHandler ")
|
||||
fmt.Printf("[%s] ", msg.Topic())
|
||||
fmt.Printf("%s\n", msg.Payload())
|
||||
}
|
||||
|
||||
func brokerConnectionHandler(client *MQTT.Client, msg MQTT.Message) {
|
||||
brokerConnection <- true
|
||||
fmt.Printf("BrokerConnectionHandler ")
|
||||
fmt.Printf("[%s] ", msg.Topic())
|
||||
fmt.Printf("%s\n", msg.Payload())
|
||||
}
|
||||
|
||||
func brokerClientsHandler(client *MQTT.Client, msg MQTT.Message) {
|
||||
brokerClients <- true
|
||||
fmt.Printf("BrokerClientsHandler ")
|
||||
fmt.Printf("[%s] ", msg.Topic())
|
||||
fmt.Printf("%s\n", msg.Payload())
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("router-sample")
|
||||
opts.SetCleanSession(true)
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
if token := c.Subscribe("$SYS/broker/load/#", 0, brokerLoadHandler); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if token := c.Subscribe("$SYS/broker/connection/#", 0, brokerConnectionHandler); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if token := c.Subscribe("$SYS/broker/clients/#", 0, brokerClientsHandler); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
loadCount := 0
|
||||
connectionCount := 0
|
||||
clientsCount := 0
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
select {
|
||||
case <-brokerLoad:
|
||||
loadCount++
|
||||
case <-brokerConnection:
|
||||
connectionCount++
|
||||
case <-brokerClients:
|
||||
clientsCount++
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Received %3d Broker Load messages\n", loadCount)
|
||||
fmt.Printf("Received %3d Broker Connection messages\n", connectionCount)
|
||||
fmt.Printf("Received %3d Broker Clients messages\n", clientsCount)
|
||||
|
||||
c.Disconnect(250)
|
||||
}
|
|
@ -1,130 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
/*
|
||||
Options:
|
||||
[-help] Display help
|
||||
[-a pub|sub] Action pub (publish) or sub (subscribe)
|
||||
[-m <message>] Payload to send
|
||||
[-n <number>] Number of messages to send or receive
|
||||
[-q 0|1|2] Quality of Service
|
||||
[-clean] CleanSession (true if -clean is present)
|
||||
[-id <clientid>] CliendID
|
||||
[-user <user>] User
|
||||
[-password <password>] Password
|
||||
[-broker <uri>] Broker URI
|
||||
[-topic <topic>] Topic
|
||||
[-store <path>] Store Directory
|
||||
|
||||
*/
|
||||
|
||||
func main() {
|
||||
topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe")
|
||||
broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883")
|
||||
password := flag.String("password", "", "The password (optional)")
|
||||
user := flag.String("user", "", "The User (optional)")
|
||||
id := flag.String("id", "testgoid", "The ClientID (optional)")
|
||||
cleansess := flag.Bool("clean", false, "Set Clean Session (default false)")
|
||||
qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)")
|
||||
num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)")
|
||||
payload := flag.String("message", "", "The message text to publish (default empty)")
|
||||
action := flag.String("action", "", "Action publish or subscribe (required)")
|
||||
store := flag.String("store", ":memory:", "The Store Directory (default use memory store)")
|
||||
flag.Parse()
|
||||
|
||||
if *action != "pub" && *action != "sub" {
|
||||
fmt.Println("Invalid setting for -action, must be pub or sub")
|
||||
return
|
||||
}
|
||||
|
||||
if *topic == "" {
|
||||
fmt.Println("Invalid setting for -topic, must not be empty")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Sample Info:\n")
|
||||
fmt.Printf("\taction: %s\n", *action)
|
||||
fmt.Printf("\tbroker: %s\n", *broker)
|
||||
fmt.Printf("\tclientid: %s\n", *id)
|
||||
fmt.Printf("\tuser: %s\n", *user)
|
||||
fmt.Printf("\tpassword: %s\n", *password)
|
||||
fmt.Printf("\ttopic: %s\n", *topic)
|
||||
fmt.Printf("\tmessage: %s\n", *payload)
|
||||
fmt.Printf("\tqos: %d\n", *qos)
|
||||
fmt.Printf("\tcleansess: %v\n", *cleansess)
|
||||
fmt.Printf("\tnum: %d\n", *num)
|
||||
fmt.Printf("\tstore: %s\n", *store)
|
||||
|
||||
opts := MQTT.NewClientOptions()
|
||||
opts.AddBroker(*broker)
|
||||
opts.SetClientID(*id)
|
||||
opts.SetUsername(*user)
|
||||
opts.SetPassword(*password)
|
||||
opts.SetCleanSession(*cleansess)
|
||||
if *store != ":memory:" {
|
||||
opts.SetStore(MQTT.NewFileStore(*store))
|
||||
}
|
||||
|
||||
if *action == "pub" {
|
||||
client := MQTT.NewClient(opts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
fmt.Println("Sample Publisher Started")
|
||||
for i := 0; i < *num; i++ {
|
||||
fmt.Println("---- doing publish ----")
|
||||
token := client.Publish(*topic, byte(*qos), false, *payload)
|
||||
token.Wait()
|
||||
}
|
||||
|
||||
client.Disconnect(250)
|
||||
fmt.Println("Sample Publisher Disconnected")
|
||||
} else {
|
||||
receiveCount := 0
|
||||
choke := make(chan [2]string)
|
||||
|
||||
opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) {
|
||||
choke <- [2]string{msg.Topic(), string(msg.Payload())}
|
||||
})
|
||||
|
||||
client := MQTT.NewClient(opts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for receiveCount < *num {
|
||||
incoming := <-choke
|
||||
fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1])
|
||||
receiveCount++
|
||||
}
|
||||
|
||||
client.Disconnect(250)
|
||||
fmt.Println("Sample Subscriber Disconnected")
|
||||
}
|
||||
}
|
|
@ -1,150 +0,0 @@
|
|||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 1 (0x1)
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
|
||||
Validity
|
||||
Not Before: Oct 21 19:24:23 2013 GMT
|
||||
Not After : Sep 25 19:24:23 2018 GMT
|
||||
Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:c2:d1:d0:31:dc:93:c3:ad:88:0d:f8:93:fe:cc:
|
||||
aa:04:1d:85:aa:c3:bb:bd:87:04:f0:42:67:14:34:
|
||||
4a:56:94:2b:bf:d0:6b:72:30:38:39:35:20:8c:e3:
|
||||
7e:65:82:b0:7e:3e:1d:f1:18:82:b7:d6:19:59:43:
|
||||
ed:81:be:eb:51:44:fc:77:9e:37:ad:e1:a0:18:b9:
|
||||
4b:59:79:90:81:a4:e4:52:2f:fc:e2:ff:98:10:5e:
|
||||
d5:13:9a:16:62:1a:e0:cb:ab:1d:ae:da:d1:40:d4:
|
||||
97:b1:e6:e3:f1:97:2c:2a:52:73:ab:d0:a2:15:f3:
|
||||
1e:9a:b0:67:d0:62:67:4b:74:b0:bb:8f:ef:9e:32:
|
||||
6a:4c:27:4e:82:7c:16:66:ce:06:e9:a3:d9:36:4f:
|
||||
f4:3e:bc:80:00:93:c1:ca:31:cf:03:68:d4:e5:8b:
|
||||
38:45:b6:1b:35:b0:c0:e9:4a:62:75:83:01:aa:b9:
|
||||
c1:0b:c0:ee:97:c0:73:23:cd:34:ec:bb:3c:95:35:
|
||||
c8:2d:69:ff:86:d8:1f:c8:04:7e:18:de:62:c2:4b:
|
||||
37:c6:aa:8e:03:bf:2b:0d:97:20:2a:75:47:ec:98:
|
||||
29:3c:64:52:ef:91:8b:63:0f:6a:f8:c2:9d:08:6a:
|
||||
61:68:6f:64:9a:56:b2:0a:bc:7b:59:3d:7f:fd:ba:
|
||||
12:4b
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Subject Key Identifier:
|
||||
5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA
|
||||
X509v3 Authority Key Identifier:
|
||||
keyid:5B:BB:3E:8E:2D:90:AD:AE:58:07:FF:53:00:18:98:FF:44:84:4C:BA
|
||||
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
3c:89:0b:bd:49:10:a6:1a:f6:2a:4b:5f:02:3d:ee:f3:19:4f:
|
||||
c9:10:79:9c:01:ef:88:22:3d:03:5b:1a:14:46:b6:7f:9b:af:
|
||||
a5:99:1a:d4:d4:9b:d6:6f:c1:fe:96:8f:9a:9e:47:42:b4:ee:
|
||||
21:56:6a:c4:92:38:6c:81:cd:8e:31:43:86:7c:97:15:90:80:
|
||||
d8:21:f0:46:be:2a:2f:f2:96:07:85:74:a8:fa:1b:78:8f:80:
|
||||
c1:5e:bc:d9:06:c2:33:9e:8e:f9:08:dd:43:7b:6f:5a:22:67:
|
||||
46:78:5d:fb:4a:4e:c2:c6:29:94:17:53:a6:c5:a9:d6:67:06:
|
||||
4f:07:ef:da:5b:45:21:83:cb:31:b2:dc:dc:ac:13:19:98:3f:
|
||||
98:5f:2c:b4:b4:da:d4:43:d7:a9:1a:6e:b6:cf:be:85:a8:80:
|
||||
1f:8a:c1:95:8a:83:a4:af:d2:23:4a:b6:18:87:4e:28:31:36:
|
||||
03:2c:bf:e4:9e:b6:75:fd:c4:68:ed:4d:d5:a8:fa:a5:81:13:
|
||||
17:1c:43:67:02:1c:d0:e6:00:6e:8b:13:e6:60:1f:ba:40:78:
|
||||
93:25:ca:59:5a:71:cc:58:d4:52:63:1d:b3:3c:ce:37:f1:89:
|
||||
78:fc:13:fa:b3:ea:22:af:17:68:8a:a1:59:57:f5:1a:49:6e:
|
||||
b9:f6:5f:b3
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
|
||||
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
|
||||
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
|
||||
M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
|
||||
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
|
||||
MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
|
||||
ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj
|
||||
fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa
|
||||
FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8
|
||||
FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN
|
||||
NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC
|
||||
nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u
|
||||
WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG
|
||||
A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ
|
||||
T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x
|
||||
Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK
|
||||
TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo
|
||||
gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L
|
||||
E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M=
|
||||
-----END CERTIFICATE-----
|
||||
Certificate:
|
||||
Data:
|
||||
Version: 3 (0x2)
|
||||
Serial Number: 1 (0x1)
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
Issuer: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy CA
|
||||
Validity
|
||||
Not Before: Oct 21 19:24:23 2013 GMT
|
||||
Not After : Sep 25 19:24:23 2018 GMT
|
||||
Subject: C=US, ST=Dummy, L=Dummy, O=Dummy, OU=Dummy, CN=Dummy Intermediate CA
|
||||
Subject Public Key Info:
|
||||
Public Key Algorithm: rsaEncryption
|
||||
Public-Key: (2048 bit)
|
||||
Modulus:
|
||||
00:cf:7d:92:07:a5:56:1b:6f:4c:f3:34:c2:12:c2:
|
||||
34:62:3b:69:aa:a6:0c:c6:70:5b:93:bc:dc:41:98:
|
||||
61:87:61:36:be:8c:08:dd:31:a9:33:76:d3:66:3e:
|
||||
77:60:1e:ed:9e:e1:e5:ef:bf:17:91:ac:0c:63:07:
|
||||
01:ab:30:67:bc:16:a6:2f:79:f0:61:8c:79:2d:3c:
|
||||
98:60:74:61:c4:5f:60:44:85:71:92:9d:cc:7b:14:
|
||||
39:74:aa:44:f9:9f:ae:f6:c7:8d:c3:01:47:53:24:
|
||||
ac:7b:a2:f6:c5:7d:65:37:40:0b:20:c8:d4:14:cd:
|
||||
f8:f4:57:ea:23:70:f4:e3:99:2b:1c:9a:67:37:ed:
|
||||
93:c7:a7:7c:86:90:f7:ae:fc:6f:4b:18:dc:d5:eb:
|
||||
f3:68:33:d6:78:14:d1:ca:a7:06:7d:75:34:f6:c0:
|
||||
d4:15:1b:21:2b:78:d9:76:24:a5:f0:c6:13:c8:1e:
|
||||
4a:c8:ca:77:34:4e:f8:fa:49:5f:6c:e1:66:a8:65:
|
||||
f0:8c:bc:44:20:03:ac:af:4a:61:a5:39:48:51:1b:
|
||||
cb:d8:22:29:60:27:47:42:fc:bf:6a:77:65:58:09:
|
||||
20:82:1c:d1:16:5e:5a:18:ea:99:61:8e:93:94:27:
|
||||
30:20:dd:44:03:50:43:b4:ec:a3:0f:ee:91:69:d7:
|
||||
b1:5b
|
||||
Exponent: 65537 (0x10001)
|
||||
X509v3 extensions:
|
||||
X509v3 Basic Constraints:
|
||||
CA:TRUE
|
||||
Signature Algorithm: sha1WithRSAEncryption
|
||||
39:a0:8d:2f:68:22:1d:4f:3e:db:f1:9b:29:20:77:23:f8:21:
|
||||
34:17:84:00:88:a8:3e:a1:4d:84:94:90:96:02:e6:6a:b4:20:
|
||||
51:a0:66:20:38:05:18:aa:2a:3e:9a:50:60:af:eb:4a:70:ac:
|
||||
9b:59:30:d5:17:14:9c:b4:91:6a:1b:c3:45:8a:dd:cd:2f:c6:
|
||||
c5:8c:fe:d0:76:20:63:a4:97:db:e3:2a:8e:c1:3d:c8:b6:06:
|
||||
2d:49:7a:d9:8a:de:16:ea:5d:5f:fb:41:79:0d:8f:d2:23:00:
|
||||
d9:b9:6f:93:45:bb:74:17:ea:6b:72:13:01:86:fe:8d:7e:8f:
|
||||
27:71:76:a9:37:6d:6c:90:5a:3f:d9:6d:4d:6c:a4:64:7a:ea:
|
||||
82:c9:87:ee:6a:d0:6e:30:05:7f:19:1d:19:31:a9:9a:ce:21:
|
||||
84:da:47:c7:a0:66:12:e8:7e:57:69:5d:9c:24:e5:46:3c:bf:
|
||||
37:f6:88:c3:b1:42:de:3b:81:ed:f5:ae:e2:23:9e:c2:89:a1:
|
||||
e7:5c:1d:49:0f:ed:ae:55:60:0e:4e:4c:e9:8a:64:e6:ae:c5:
|
||||
d1:99:a7:70:4c:7e:5d:53:ac:88:2c:0f:0b:21:94:1a:32:f9:
|
||||
a1:cc:1e:67:98:6b:b6:e9:b1:b9:4b:46:02:b1:65:c9:49:83:
|
||||
80:bd:b9:70
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
|
||||
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
|
||||
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
|
||||
M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
|
||||
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
|
||||
MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH
|
||||
YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE
|
||||
X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj
|
||||
mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw
|
||||
xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY
|
||||
CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud
|
||||
EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX
|
||||
hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK
|
||||
3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX
|
||||
6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa
|
||||
R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK
|
||||
ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA=
|
||||
-----END CERTIFICATE-----
|
|
@ -1,9 +0,0 @@
|
|||
Certificate structure:
|
||||
|
||||
Root CA
|
||||
|
|
||||
|-> Intermediate CA
|
||||
|
|
||||
|-> Server
|
||||
|
|
||||
|-> Client
|
|
@ -1,20 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDRzCCAi8CAQIwDQYJKoZIhvcNAQEFBQAwbTELMAkGA1UEBhMCVVMxDjAMBgNV
|
||||
BAgMBUR1bW15MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNV
|
||||
BAsMBUR1bW15MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwHhcNMTMx
|
||||
MDIxMTkyNDIzWhcNMTgwOTI1MTkyNDIzWjBmMQswCQYDVQQGEwJVUzEOMAwGA1UE
|
||||
CAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEOMAwGA1UE
|
||||
CwwFRHVtbXkxFzAVBgNVBAMMDkR1bW15IChjbGllbnQpMIIBIjANBgkqhkiG9w0B
|
||||
AQEFAAOCAQ8AMIIBCgKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5r
|
||||
bFxHZ5ye36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3y
|
||||
lLtHCLi5nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+Fb
|
||||
maHEU3LHua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y
|
||||
5/cnc7XGsTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYP
|
||||
zC4nSN8R2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABMA0GCSqGSIb3
|
||||
DQEBBQUAA4IBAQAMWt9qMUOY5z1uyYcjUnconPHLM9MADCZI2sRbfdBOBHEnTVKv
|
||||
Y63SWnCt8TRJb01LKLIEys6pW1NUlxr6b+FwicNmycR0L8b63cmNXg2NmSZsnK9C
|
||||
fGT6BbbDdVPYjvmghpSd3soBGBLPsJvaFc6UL5tunm+hT7PxWjDxHZEiE18PTs05
|
||||
Vpp/ytILzhoXvJeFOWQHIdf4DLR5izGMNTKdQzgg1eBq2vKgjJIlEZ3j/AyHkJLE
|
||||
qFip1tyc0PRzgKYFLWttaZzakCLJOGuxtvYB+GrixVM7U23p5LQbLE0KX7fe2Gql
|
||||
xKMfSID5NUDNf1SuSrrGLD3gfnJEKVB8TVBk
|
||||
-----END CERTIFICATE-----
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEA4J/+eKqsjK0QS+cSDa5Fh4XM4THy812JkWMySA5rbFxHZ5ye
|
||||
36/IuwyRQ0Yn2DvhhsDR5K7yz8K+Yfp9A6WRBkyHK/sy/8vurQHeDH3ylLtHCLi5
|
||||
nfyt2fDxWOYwFrS1giGn2IxJIHBAWu4cBODCkqwqAp92+Lqp3Sn+D+FbmaHEU3LH
|
||||
ua8OIJiSeAIHo/jPqfHFqZxK1bXhGCSQKvUZCaTftsqDtn+LZSElqj1y5/cnc7XG
|
||||
sTf8ml/+FDMX1aSAHf+pu+UAp9JqOXOM60A5JIpYu3Lsejp1RppyPJYPzC4nSN8R
|
||||
2LOdDChP2MB7f1/sXRGlLM/X3Vi4X+c6xQ85TQIDAQABAoIBABosCiZdHIW3lHKD
|
||||
leLqL0e/G0QR4dDhUSoTeMRUiceyaM91vD0r6iOBL1u7TOEw+PIOfWY7zCbQ9gXM
|
||||
fcxy+hbVy9ogBq0vQbv+v7SM6DrUJ06o11fFHSyLmlNVXr0GiS+EZF4i2lJhQd5W
|
||||
aAVZetJEJRDxK5eHiEswnV2UUGvx6VCpFILL0JVGxWY7oOPxiiBLl+cmfRZdTfGx
|
||||
46VzQvBu7N8hGpCIsljuVFP/DxR7c+2oyrtFaFSMZBMNI8fICgkb2QeLk/XUBXtn
|
||||
0bDttgmOP/BvnNAor7nIRoeer/7kbXc9jOsgXwnvDKPapltQddL+exycXzbIjLuY
|
||||
Z2SFsDECgYEA+2A4QGV0biqdICAoKCHCHCU/CrdDUQiQDHqRU6/nhka7MFPSl4Wy
|
||||
9oISRrYZhKIbSbaXwTW5ZcYq8Hpn/yGYIWlINP9sjprnOWPE7L74lac+PFWXNMUI
|
||||
jNJOJkLK1IeppByXAt5ekGBrG556bhzRCJsTjYsyUR/r/bMEF1FD8WMCgYEA5MHM
|
||||
hqmkDK5CbklVaPonNc251Lx+HSzzQ40WExC/PrCczRaZMKlhmyKZfWJCInQsUDln
|
||||
w6Lqa5UnwZV2HYAF30VZYQsq84ulNnx1/36BEZyIimfAL1WHvKeGWjGsZqniXxxb
|
||||
Os5wEMAvxk0SWVrR5v6YpBDv3t9+lLg/bzBOAY8CgYEAuZ0q7CH9/vroWrhj7n4+
|
||||
3pmCG1+HDWbNNumqNalFxBimT+EVN1058FvLMvtzjERG8f8pvzj0VPom6rr336Pm
|
||||
uYUMFFYmyoYHBpFs74Nz+s0rX1Gz/PsgfRstKYNYUeZ6lPunZi7clK8dZ591t6j/
|
||||
kOMxZOrLlKuFjieJdc5D5RECgYAVTzxXOwxOJhmIHoq3Sb5HU8/A0oJJA3vxyf3J
|
||||
buDx3Q/uRvGkR9MQ2YtE09dnUD0kiARzhASkWvOmI98p5lglsVcfJCQvJc4RIkz3
|
||||
rPgnBNbvVbTgc+4+E7j/Q+tUcPTmeUTCWKK13MFWjq1r53rwMr1TY0SFFXq8LeGy
|
||||
4OQTXwKBgQDCuPN3Q+EJusYy7TXt0WicY/xyu15s1216N7PmRKFr/WAn2JdAfjbD
|
||||
JKDwVqo0AQiEDAobJk0JMPs+ENK2d58GsybCK4QGAh6z5FGunb5T432YfnoXtL3J
|
||||
ZKVvkf7eowvokTIeiDf3XrCPajLDBpo88Xax+RH03US7XRdu/fVzMA==
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -1,20 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDWDCCAkCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
|
||||
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
|
||||
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
|
||||
M1oXDTE4MDkyNTE5MjQyM1owbTELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
|
||||
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
|
||||
MR4wHAYDVQQDDBVEdW1teSBJbnRlcm1lZGlhdGUgQ0EwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDPfZIHpVYbb0zzNMISwjRiO2mqpgzGcFuTvNxBmGGH
|
||||
YTa+jAjdMakzdtNmPndgHu2e4eXvvxeRrAxjBwGrMGe8FqYvefBhjHktPJhgdGHE
|
||||
X2BEhXGSncx7FDl0qkT5n672x43DAUdTJKx7ovbFfWU3QAsgyNQUzfj0V+ojcPTj
|
||||
mSscmmc37ZPHp3yGkPeu/G9LGNzV6/NoM9Z4FNHKpwZ9dTT2wNQVGyEreNl2JKXw
|
||||
xhPIHkrIync0Tvj6SV9s4WaoZfCMvEQgA6yvSmGlOUhRG8vYIilgJ0dC/L9qd2VY
|
||||
CSCCHNEWXloY6plhjpOUJzAg3UQDUEO07KMP7pFp17FbAgMBAAGjEDAOMAwGA1Ud
|
||||
EwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADmgjS9oIh1PPtvxmykgdyP4ITQX
|
||||
hACIqD6hTYSUkJYC5mq0IFGgZiA4BRiqKj6aUGCv60pwrJtZMNUXFJy0kWobw0WK
|
||||
3c0vxsWM/tB2IGOkl9vjKo7BPci2Bi1JetmK3hbqXV/7QXkNj9IjANm5b5NFu3QX
|
||||
6mtyEwGG/o1+jydxdqk3bWyQWj/ZbU1spGR66oLJh+5q0G4wBX8ZHRkxqZrOIYTa
|
||||
R8egZhLofldpXZwk5UY8vzf2iMOxQt47ge31ruIjnsKJoedcHUkP7a5VYA5OTOmK
|
||||
ZOauxdGZp3BMfl1TrIgsDwshlBoy+aHMHmeYa7bpsblLRgKxZclJg4C9uXA=
|
||||
-----END CERTIFICATE-----
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAz32SB6VWG29M8zTCEsI0YjtpqqYMxnBbk7zcQZhhh2E2vowI
|
||||
3TGpM3bTZj53YB7tnuHl778XkawMYwcBqzBnvBamL3nwYYx5LTyYYHRhxF9gRIVx
|
||||
kp3MexQ5dKpE+Z+u9seNwwFHUySse6L2xX1lN0ALIMjUFM349FfqI3D045krHJpn
|
||||
N+2Tx6d8hpD3rvxvSxjc1evzaDPWeBTRyqcGfXU09sDUFRshK3jZdiSl8MYTyB5K
|
||||
yMp3NE74+klfbOFmqGXwjLxEIAOsr0phpTlIURvL2CIpYCdHQvy/andlWAkgghzR
|
||||
Fl5aGOqZYY6TlCcwIN1EA1BDtOyjD+6RadexWwIDAQABAoIBAEs6OsS85DBENUEE
|
||||
QszsTnPDGLd/Rqh3uiwhUDYUGmAsFd4WBWy1AaSgE1tBkKRv8jUlr+kxfkkZeNA6
|
||||
jRdVEHc4Ov6Blm63sIN/Mbve1keNUOjm/NtsjOOe3In45dMfWx8sELC/+O0jIcod
|
||||
tpy5rwXOGXrEdWgpmXZ1nXVGEfOmQH3eGEPkqbY1I4YlAoXD0mc5fNQQrn7qrogH
|
||||
M5USCnC44yIIF0Yube2Fg0Cem41vzIvENAlZC273gyW+pQwez0uma2LaCWmkEz1N
|
||||
sESrNSQ4yeQnDQYlgX2w3RRpqql4GDzAdISL2WJcNhW6KJ72B0SQ1ny/TmQgZePG
|
||||
Ojv1T0ECgYEA9CXqKyXBSPF+Wdc/fNagrIi6tcNkLAN2/p5J3Z6TtbZGjItoMlDX
|
||||
c+hwHobcI3GZLMlxlBx7ePc7cKgaMDXrl8BZZjFoyEV9OHOLicfNkLFmBIZ14gtX
|
||||
bGZYDuCcal46r7IKRjT8lcYWCoLJnI9vLEII7Q7P/eBgcntw3+h/ziECgYEA2ZAa
|
||||
bp9d0xBaOXq/E341guxNG49R09/DeZ/2CEM+V1pMD8OVH9cvxrBdDLUmAnrqeGTh
|
||||
Djoi1UEbOVAV6/dXbTQHrla+HF4Uq+t9tV+mt68TEa54PQ/ERt5ih3nZGBiqZ6rX
|
||||
SGeyZmIXMLIZEs2dIbJ2DmLcZj6Tjxkd/PxPt/sCgYBGczZaEv/uK3k5NWplfI1K
|
||||
m/28e1BJfwp0OHq6D4sx8RH0djmv4zH4iUbpGCMnuxznFo3Gnl1mr3igbnF4HecI
|
||||
mAF0AqfoulyC0JygOl5v9TCp957Ghl1Is1OPn3KjIuOuVSKv1ZRZJ5qul8TTf3Qm
|
||||
AjwPI6oS6Q8LmeEdSzqt4QKBgB5MglHboe5t/ZK5tHibgApOrGJlMEkohYmfrFz0
|
||||
OG9j5OnhHBiGGGI8V4kYhUWdJqBDtFAN6qH2Yjs2Gwd0t9k+gL9X1zwOIiTbM/OZ
|
||||
cZdtK2Ov/5DJbFVOTTx+zKwda0Xqtfagcmjtyjr+4p0Kw5JYzzYrsHQQzO4F2nZM
|
||||
ETIXAoGADskTzhgpPrC5/qfuLY4gBUtCfYIb8kaKN90AT8A/14lBrT4lSnmsEvKP
|
||||
tRDmFjnc/ogDlHa5SRDijtT6UoyQPuauAt6DYrJ8G6qKJqiMwJcuLV1XFks7z1J8
|
||||
VzB8kso1pPAtcvVXBPklsjvZ10NdQOCqm4N3EVp69agbB1oco4I=
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -1,18 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIC8DCCAlmgAwIBAgIJAOD63PlXjJi8MA0GCSqGSIb3DQEBBQUAMIGQMQswCQYD
|
||||
VQQGEwJHQjEXMBUGA1UECAwOVW5pdGVkIEtpbmdkb20xDjAMBgNVBAcMBURlcmJ5
|
||||
MRIwEAYDVQQKDAlNb3NxdWl0dG8xCzAJBgNVBAsMAkNBMRYwFAYDVQQDDA1tb3Nx
|
||||
dWl0dG8ub3JnMR8wHQYJKoZIhvcNAQkBFhByb2dlckBhdGNob28ub3JnMB4XDTEy
|
||||
MDYyOTIyMTE1OVoXDTIyMDYyNzIyMTE1OVowgZAxCzAJBgNVBAYTAkdCMRcwFQYD
|
||||
VQQIDA5Vbml0ZWQgS2luZ2RvbTEOMAwGA1UEBwwFRGVyYnkxEjAQBgNVBAoMCU1v
|
||||
c3F1aXR0bzELMAkGA1UECwwCQ0ExFjAUBgNVBAMMDW1vc3F1aXR0by5vcmcxHzAd
|
||||
BgkqhkiG9w0BCQEWEHJvZ2VyQGF0Y2hvby5vcmcwgZ8wDQYJKoZIhvcNAQEBBQAD
|
||||
gY0AMIGJAoGBAMYkLmX7SqOT/jJCZoQ1NWdCrr/pq47m3xxyXcI+FLEmwbE3R9vM
|
||||
rE6sRbP2S89pfrCt7iuITXPKycpUcIU0mtcT1OqxGBV2lb6RaOT2gC5pxyGaFJ+h
|
||||
A+GIbdYKO3JprPxSBoRponZJvDGEZuM3N7p3S/lRoi7G5wG5mvUmaE5RAgMBAAGj
|
||||
UDBOMB0GA1UdDgQWBBTad2QneVztIPQzRRGj6ZHKqJTv5jAfBgNVHSMEGDAWgBTa
|
||||
d2QneVztIPQzRRGj6ZHKqJTv5jAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUA
|
||||
A4GBAAqw1rK4NlRUCUBLhEFUQasjP7xfFqlVbE2cRy0Rs4o3KS0JwzQVBwG85xge
|
||||
REyPOFdGdhBY2P1FNRy0MDr6xr+D2ZOwxs63dG1nnAnWZg7qwoLgpZ4fESPD3PkA
|
||||
1ZgKJc2zbSQ9fCPxt2W3mdVav66c6fsb7els2W2Iz7gERJSX
|
||||
-----END CERTIFICATE-----
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDizCCAnOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJVUzEO
|
||||
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
|
||||
MAwGA1UECwwFRHVtbXkxETAPBgNVBAMMCER1bW15IENBMB4XDTEzMTAyMTE5MjQy
|
||||
M1oXDTE4MDkyNTE5MjQyM1owYDELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBUR1bW15
|
||||
MQ4wDAYDVQQHDAVEdW1teTEOMAwGA1UECgwFRHVtbXkxDjAMBgNVBAsMBUR1bW15
|
||||
MREwDwYDVQQDDAhEdW1teSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
|
||||
ggEBAMLR0DHck8OtiA34k/7MqgQdharDu72HBPBCZxQ0SlaUK7/Qa3IwODk1IIzj
|
||||
fmWCsH4+HfEYgrfWGVlD7YG+61FE/HeeN63hoBi5S1l5kIGk5FIv/OL/mBBe1ROa
|
||||
FmIa4MurHa7a0UDUl7Hm4/GXLCpSc6vQohXzHpqwZ9BiZ0t0sLuP754yakwnToJ8
|
||||
FmbOBumj2TZP9D68gACTwcoxzwNo1OWLOEW2GzWwwOlKYnWDAaq5wQvA7pfAcyPN
|
||||
NOy7PJU1yC1p/4bYH8gEfhjeYsJLN8aqjgO/Kw2XICp1R+yYKTxkUu+Ri2MPavjC
|
||||
nQhqYWhvZJpWsgq8e1k9f/26EksCAwEAAaNQME4wHQYDVR0OBBYEFFu7Po4tkK2u
|
||||
WAf/UwAYmP9EhEy6MB8GA1UdIwQYMBaAFFu7Po4tkK2uWAf/UwAYmP9EhEy6MAwG
|
||||
A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADyJC71JEKYa9ipLXwI97vMZ
|
||||
T8kQeZwB74giPQNbGhRGtn+br6WZGtTUm9Zvwf6Wj5qeR0K07iFWasSSOGyBzY4x
|
||||
Q4Z8lxWQgNgh8Ea+Ki/ylgeFdKj6G3iPgMFevNkGwjOejvkI3UN7b1oiZ0Z4XftK
|
||||
TsLGKZQXU6bFqdZnBk8H79pbRSGDyzGy3NysExmYP5hfLLS02tRD16kabrbPvoWo
|
||||
gB+KwZWKg6Sv0iNKthiHTigxNgMsv+SetnX9xGjtTdWo+qWBExccQ2cCHNDmAG6L
|
||||
E+ZgH7pAeJMlyllaccxY1FJjHbM8zjfxiXj8E/qz6iKvF2iKoVlX9RpJbrn2X7M=
|
||||
-----END CERTIFICATE-----
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAwtHQMdyTw62IDfiT/syqBB2FqsO7vYcE8EJnFDRKVpQrv9Br
|
||||
cjA4OTUgjON+ZYKwfj4d8RiCt9YZWUPtgb7rUUT8d543reGgGLlLWXmQgaTkUi/8
|
||||
4v+YEF7VE5oWYhrgy6sdrtrRQNSXsebj8ZcsKlJzq9CiFfMemrBn0GJnS3Swu4/v
|
||||
njJqTCdOgnwWZs4G6aPZNk/0PryAAJPByjHPA2jU5Ys4RbYbNbDA6UpidYMBqrnB
|
||||
C8Dul8BzI8007Ls8lTXILWn/htgfyAR+GN5iwks3xqqOA78rDZcgKnVH7JgpPGRS
|
||||
75GLYw9q+MKdCGphaG9kmlayCrx7WT1//boSSwIDAQABAoIBAGphOzge5Cjzdtl6
|
||||
JQX7J9M7c6O9YaSqN44iFDs6GmWQXxtMaX9eyTSjx/RmvLwdUtZ8gMkHw0kzBYBy
|
||||
0RwJ7mDgNKP0px6xl0Qo2fYvpTLFoU8nmQUy4AwAXIVpnFNRrfJIq9qw7ZZi/7pL
|
||||
A6kGDT3G7Bajw/4MVWfOb8GgGhte1ZhZgXFEZNjGkhwi3Na1/6slOQIfnkkhco0X
|
||||
ru1Cw82nXNPHqu6K+pbHP9ucYdUNZWRh+yQS3p92lr5tB3/IL/lD0Cl3+xP8JFl+
|
||||
5NMSISOKGb3ld0rzrJd1ncgLgv/XlHu8DqvcFs9QwXbaUlG0U/0GrorGYqFaZYaH
|
||||
R1rkZjECgYEA9mAarVAeL7IOeEIg28f/qyp//5+pMzRpVhnI+xscHB5QUO9WH+uE
|
||||
nOXwcGvcRME134H4o/0j75aMhVs7sGfMOQ+enAwOxRC5h4MCClDSWysWftU8Ihhf
|
||||
Sm6eZ0kYLZNqXt/TxTs124NiF1Bb5pekzEr9fTj//vP4meuAQ/D0JoUCgYEAym4f
|
||||
BCm5tLwYYxZM4tko0g9BHxy4aAPfyshuLed1JjkK4JCFp368GBoknj5rUNewTun2
|
||||
1zkQF9b5Mi3k5qWkboP5rpp7DuG3PJdWypV6b/btUeqcyG1gteQwTAwebfqeM0vH
|
||||
QvpuAoRMtEcSBQBl2s9zgmObXUpDlLwuIlL+to8CgYEAyJBtxx8Mo9k4jE+Q/jnu
|
||||
+QFtF8R68jM9eRkeksR7+qv2yBw+KVgKKcvKE0rLErGS0LO2nJELexQ8qqcdjTrC
|
||||
dsUvYmsybtxxnE5bD9jBlfQaqP+fp0Xd9PLeQsivRRLXqgpeFBZifqOS69XAKpTS
|
||||
VHjLqPAI/hzQCUU8spJpvx0CgYAePgt2NMGgxcUi8I72CRl3IH5LJqBKMeH6Sq1j
|
||||
QEQZPMZqPE0rc9yoASfdWFfyEPcvIvcUulq0JRK/s2mSJ8cEF8Vyl3OxCnm0nKuD
|
||||
woczOQHFjjZ0HxsmsXuhsOHO7nU6FqUjVYSf7aIEAOYpRyDwarPIFBd+/XxROTfv
|
||||
OtUA8wKBgAOiGXRxycb4rAtJBDqPAgdAAwNgvQHyVgn32ArWtgu8ermuZW5h1y45
|
||||
hULFvCbLSCpo+I7QhRhw4y2DoB1DgIw04BeFUIcE+az7HH3euAyCLQ0caaA8Xk/6
|
||||
bpPfUMe1SNi51f345QlOPvvwGllTC6DeBhZ730k7VNB32dOCV3kE
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDYTCCAkmgAwIBAgIBATANBgkqhkiG9w0BAQUFADBtMQswCQYDVQQGEwJVUzEO
|
||||
MAwGA1UECAwFRHVtbXkxDjAMBgNVBAcMBUR1bW15MQ4wDAYDVQQKDAVEdW1teTEO
|
||||
MAwGA1UECwwFRHVtbXkxHjAcBgNVBAMMFUR1bW15IEludGVybWVkaWF0ZSBDQTAe
|
||||
Fw0xMzEwMjExOTI0MjNaFw0xODA5MjUxOTI0MjNaMGYxCzAJBgNVBAYTAlVTMQ4w
|
||||
DAYDVQQIDAVEdW1teTEOMAwGA1UEBwwFRHVtbXkxDjAMBgNVBAoMBUR1bW15MQ4w
|
||||
DAYDVQQLDAVEdW1teTEXMBUGA1UEAwwORHVtbXkgKHNlcnZlcikwggEiMA0GCSqG
|
||||
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0fQCRUWXt+i7JMR55Zuo6wBRxG7RnPutN
|
||||
2L7J/18io52vxjm8AZDiC0JFkCHh72ZzvbgVA+e+WxAIYfioRis4JWw4jK8v5m8q
|
||||
cZzS0GJNTMROPiZQi7A81tAbrV00XN7d5PsmIJ2Bf4XbJWMy31CsmoFloeRMd7bR
|
||||
LxwDIb0qqRawhKsWdfZB/c9wGKmHlei50B7PXk+koKnVdsLwXxtCZDvc/3fNRHEK
|
||||
lZs4m0N05G38FdrnczPm/0pie87nK9rnklL7u1sYOukOznnOtW5h7+A4M+DxzME0
|
||||
HRU6k4d+6QvukxBlsE93gHhwRsejIuDGlqD+DRxk2PdmmgsmPH59AgMBAAGjEzAR
|
||||
MA8GA1UdEQQIMAaHBAoKBOQwDQYJKoZIhvcNAQEFBQADggEBAJ3bKs2b4cAJWTZj
|
||||
69dMEfYZKcQIXs7euwtKlP7H8m5c+X5KmZPi1Puq4Z0gtvLu/z7J9UjZjG0CoylV
|
||||
q15Zp5svryJ7XzcsZs7rwyo1JtngW1z54wr9MezqIOF2w12dTwEAINFsW7TxAsH7
|
||||
bfqkzZjuCbbsww5q4eHuZp0yaMHc3hOGaUot27OTlxlIMhv7VBBqWAj0jmvAfTKf
|
||||
la0SiL/Mc8rD8D5C0SXGcCL6li/kqtinAxzhokuyyPf+hQX35kcZxEPu6WxtYVLv
|
||||
hMzrokOZP2FrGbCnhaNT8gw4Aa0RXV1JgonRWYSbkeaCzvr2bJ0OuJiDdwdRKvOo
|
||||
raKLlfY=
|
||||
-----END CERTIFICATE-----
|
|
@ -1,27 +0,0 @@
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAtH0AkVFl7fouyTEeeWbqOsAUcRu0Zz7rTdi+yf9fIqOdr8Y5
|
||||
vAGQ4gtCRZAh4e9mc724FQPnvlsQCGH4qEYrOCVsOIyvL+ZvKnGc0tBiTUzETj4m
|
||||
UIuwPNbQG61dNFze3eT7JiCdgX+F2yVjMt9QrJqBZaHkTHe20S8cAyG9KqkWsISr
|
||||
FnX2Qf3PcBiph5XoudAez15PpKCp1XbC8F8bQmQ73P93zURxCpWbOJtDdORt/BXa
|
||||
53Mz5v9KYnvO5yva55JS+7tbGDrpDs55zrVuYe/gODPg8czBNB0VOpOHfukL7pMQ
|
||||
ZbBPd4B4cEbHoyLgxpag/g0cZNj3ZpoLJjx+fQIDAQABAoIBAG0UfxtUTn4dDdma
|
||||
TgihIj6Ph8s0Kzua0yshK215YU3WBJ8O9iWh7KYwl8Ti7xdVUF3y8yYATjbFYlMu
|
||||
otFQVx5/v4ANxnL0mYrVTyo5tq9xDdMbzJwxUDn0uaGAjSvwVOFWWlMYsxhoscVY
|
||||
OzOrs14dosaBqTBtyZdzGULrSSBWPCBlucRcvTV/eZwgYrYJ3bG66ZTfdc930KPj
|
||||
nfkWrsAWmPz8irHoWQ2OX+ZJTprVYRYIZXqpFn3zuwmhpJkZUVULMMk6LFBKDmBT
|
||||
F2+b4h49P+oNJ+6CRoOERHYq2k1MmYBcu1z8lMjdfRGUDdK4vS9pcqhBXJJg1vU9
|
||||
APRtfiECgYEA6Y3LqQJLkUI0w6g/9T+XyzUoi0aUfH6PT81XnGYqJxTBHinZvgML
|
||||
mF3qtZ0bHGwEoAsyhSgDkeCawE/E7Phd+B6aku2QMVm8GHygZg0Pbao4cxXv+CF3
|
||||
i1Lo7n3zY0kTVrjsvDRsDDESmRK4Ea48fJwOfUEtfG6VDtwmZAe8chcCgYEAxdWd
|
||||
sWcc45ARi2vY6yb5Ysgt/g0z26KyQydF+GMWIz1FDfUxXJ/axdCovd3VIHDvItJE
|
||||
n9LjFiobkyOKX99ou1foWwsmhn11duVrF7hsVrE0nsbd4RX3sTbqXa9x3GN/ujFr
|
||||
0xHUTmiXt3Qyn/076jBiLGnbtzSxJ/IZIEI9VIsCgYEAketHnTaT5BOLR9ss6ptq
|
||||
yUlTJYFZcFbaTy+qV0r1dyleZuwa4L6iVfYHmKSptZ4/XYbhb5RKdq/vv8uW679Z
|
||||
ZpYoWTgX6N15yYrD5D6wrwG09yJzpYGzYNbSNX93u0aC0KIFNqlCAHQAfKbXXiSQ
|
||||
IgKWgudf9ehZNMmTKtgygs0CgYAoTV9Fr7Lj7QqV84+KQDNX2137PmdNHDTil1Ka
|
||||
ylzNKwMxV70JmIsx91MY8uMjK76bwmg2gvi+IC/j5r6ez11/pOXx/jCH/3D5mr0Z
|
||||
ZPm1I36LxgmXfCkskfpmwYIZmq9/l+fWZPByVL5roiFaFHWrPNYTJDGdff+FGr3h
|
||||
o3zpBwKBgDY1sih/nY+6rwOP+DcabGK9KFFKLXsoJrXobEniLxp7oFaGN2GkmKvN
|
||||
NajCs5pr3wfb4LrVrsNvERnUsUXWg6ReLqfWbT4bmjzE2iJ3IbtVQ5M4kl6YrbdZ
|
||||
PMgWoLCqnoo8NoGBtmVMWhaXNJvVZPgZHk33T5F0Cg6PKNdHDchH
|
||||
-----END RSA PRIVATE KEY-----
|
|
@ -1,51 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) {
|
||||
fmt.Println("onConnect")
|
||||
if token := client.Subscribe("shirou@github/#", 0, nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var subscribed = "#"
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883")
|
||||
opts.SetDefaultPublishHandler(f)
|
||||
opts.SetOnConnectHandler(onConnect)
|
||||
opts.SetCleanSession(true)
|
||||
|
||||
opts.SetUsername("shirou@github")
|
||||
opts.SetPassword("8Ub6F68kfYlr7RoV")
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
qos := 0
|
||||
retain := false
|
||||
payload := "sanple"
|
||||
topic := "shirou@github/log"
|
||||
token := c.Publish(topic, byte(qos), retain, payload)
|
||||
// token.Wait()
|
||||
fmt.Println("%v", token.Error())
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
var subscribed = "#"
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
|
||||
opts.SetDefaultPublishHandler(f)
|
||||
opts.SetCleanSession(true)
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
if token := c.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
var onConnect MQTT.OnConnectHandler = func(client *MQTT.Client) {
|
||||
fmt.Println("onConnect")
|
||||
if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var subscribed = "#"
|
||||
|
||||
func main() {
|
||||
// opts := MQTT.NewClientOptions().AddBroker("tcp://localhost:1883")
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://lite.mqtt.shiguredo.jp:1883")
|
||||
opts.SetDefaultPublishHandler(f)
|
||||
opts.SetOnConnectHandler(onConnect)
|
||||
opts.SetCleanSession(true)
|
||||
|
||||
opts.SetUsername("shirou@github.com")
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
|
@ -1,130 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
/*
|
||||
Options:
|
||||
[-help] Display help
|
||||
[-a pub|sub] Action pub (publish) or sub (subscribe)
|
||||
[-m <message>] Payload to send
|
||||
[-n <number>] Number of messages to send or receive
|
||||
[-q 0|1|2] Quality of Service
|
||||
[-clean] CleanSession (true if -clean is present)
|
||||
[-id <clientid>] CliendID
|
||||
[-user <user>] User
|
||||
[-password <password>] Password
|
||||
[-broker <uri>] Broker URI
|
||||
[-topic <topic>] Topic
|
||||
[-store <path>] Store Directory
|
||||
|
||||
*/
|
||||
|
||||
func main() {
|
||||
topic := flag.String("topic", "", "The topic name to/from which to publish/subscribe")
|
||||
broker := flag.String("broker", "tcp://iot.eclipse.org:1883", "The broker URI. ex: tcp://10.10.1.1:1883")
|
||||
password := flag.String("password", "", "The password (optional)")
|
||||
user := flag.String("user", "", "The User (optional)")
|
||||
id := flag.String("id", "testgoid", "The ClientID (optional)")
|
||||
cleansess := flag.Bool("clean", false, "Set Clean Session (default false)")
|
||||
qos := flag.Int("qos", 0, "The Quality of Service 0,1,2 (default 0)")
|
||||
num := flag.Int("num", 1, "The number of messages to publish or subscribe (default 1)")
|
||||
payload := flag.String("message", "", "The message text to publish (default empty)")
|
||||
action := flag.String("action", "", "Action publish or subscribe (required)")
|
||||
store := flag.String("store", ":memory:", "The Store Directory (default use memory store)")
|
||||
flag.Parse()
|
||||
|
||||
if *action != "pub" && *action != "sub" {
|
||||
fmt.Println("Invalid setting for -action, must be pub or sub")
|
||||
return
|
||||
}
|
||||
|
||||
if *topic == "" {
|
||||
fmt.Println("Invalid setting for -topic, must not be empty")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Sample Info:\n")
|
||||
fmt.Printf("\taction: %s\n", *action)
|
||||
fmt.Printf("\tbroker: %s\n", *broker)
|
||||
fmt.Printf("\tclientid: %s\n", *id)
|
||||
fmt.Printf("\tuser: %s\n", *user)
|
||||
fmt.Printf("\tpassword: %s\n", *password)
|
||||
fmt.Printf("\ttopic: %s\n", *topic)
|
||||
fmt.Printf("\tmessage: %s\n", *payload)
|
||||
fmt.Printf("\tqos: %d\n", *qos)
|
||||
fmt.Printf("\tcleansess: %v\n", *cleansess)
|
||||
fmt.Printf("\tnum: %d\n", *num)
|
||||
fmt.Printf("\tstore: %s\n", *store)
|
||||
|
||||
opts := MQTT.NewClientOptions()
|
||||
opts.AddBroker(*broker)
|
||||
opts.SetClientID(*id)
|
||||
opts.SetUsername(*user)
|
||||
opts.SetPassword(*password)
|
||||
opts.SetCleanSession(*cleansess)
|
||||
if *store != ":memory:" {
|
||||
opts.SetStore(MQTT.NewFileStore(*store))
|
||||
}
|
||||
|
||||
if *action == "pub" {
|
||||
client := MQTT.NewClient(opts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
fmt.Println("Sample Publisher Started")
|
||||
for i := 0; i < *num; i++ {
|
||||
fmt.Println("---- doing publish ----")
|
||||
token := client.Publish(*topic, byte(*qos), false, *payload)
|
||||
token.Wait()
|
||||
}
|
||||
|
||||
client.Disconnect(250)
|
||||
fmt.Println("Sample Publisher Disconnected")
|
||||
} else {
|
||||
receiveCount := 0
|
||||
choke := make(chan [2]string)
|
||||
|
||||
opts.SetDefaultPublishHandler(func(client *MQTT.Client, msg MQTT.Message) {
|
||||
choke <- [2]string{msg.Topic(), string(msg.Payload())}
|
||||
})
|
||||
|
||||
client := MQTT.NewClient(opts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
if token := client.Subscribe(*topic, byte(*qos), nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for receiveCount < *num {
|
||||
incoming := <-choke
|
||||
fmt.Printf("RECEIVED TOPIC: %s MESSAGE: %s\n", incoming[0], incoming[1])
|
||||
receiveCount++
|
||||
}
|
||||
|
||||
client.Disconnect(250)
|
||||
fmt.Println("Sample Subscriber Disconnected")
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
func main() {
|
||||
opts := MQTT.NewClientOptions().AddBroker("tcp://iot.eclipse.org:1883").SetClientID("gotrivial")
|
||||
opts.SetDefaultPublishHandler(f)
|
||||
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
if token := c.Subscribe("/go-mqtt/sample", 0, nil); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
text := fmt.Sprintf("this is msg #%d!", i)
|
||||
token := c.Publish("/go-mqtt/sample", 0, false, text)
|
||||
token.Wait()
|
||||
}
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
if token := c.Unsubscribe("/go-mqtt/sample"); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
c.Disconnect(250)
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
/*
|
||||
To run this sample, The following certificates
|
||||
must be created:
|
||||
|
||||
rootCA-crt.pem - root certificate authority that is used
|
||||
to sign and verify the client and server
|
||||
certificates.
|
||||
rootCA-key.pem - keyfile for the rootCA.
|
||||
|
||||
server-crt.pem - server certificate signed by the CA.
|
||||
server-key.pem - keyfile for the server certificate.
|
||||
|
||||
client-crt.pem - client certificate signed by the CA.
|
||||
client-key.pem - keyfile for the client certificate.
|
||||
|
||||
CAfile.pem - file containing concatenated CA certificates
|
||||
if there is more than 1 in the chain.
|
||||
(e.g. root CA -> intermediate CA -> server cert)
|
||||
|
||||
Instead of creating CAfile.pem, rootCA-crt.pem can be added
|
||||
to the default openssl CA certificate bundle. To find the
|
||||
default CA bundle used, check:
|
||||
$GO_ROOT/src/pks/crypto/x509/root_unix.go
|
||||
To use this CA bundle, just set tls.Config.RootCAs = nil.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "io/ioutil"
|
||||
import "fmt"
|
||||
import "time"
|
||||
import "crypto/tls"
|
||||
import "crypto/x509"
|
||||
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
|
||||
func NewTLSConfig() *tls.Config {
|
||||
// Import trusted certificates from CAfile.pem.
|
||||
// Alternatively, manually add CA certificates to
|
||||
// default openssl CA bundle.
|
||||
certpool := x509.NewCertPool()
|
||||
pemCerts, err := ioutil.ReadFile("samplecerts/CAfile.pem")
|
||||
if err == nil {
|
||||
certpool.AppendCertsFromPEM(pemCerts)
|
||||
}
|
||||
|
||||
// Import client certificate/key pair
|
||||
cert, err := tls.LoadX509KeyPair("samplecerts/client-crt.pem", "samplecerts/client-key.pem")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Just to print out the client certificate..
|
||||
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(cert.Leaf)
|
||||
|
||||
// Create tls.Config with desired tls properties
|
||||
return &tls.Config{
|
||||
// RootCAs = certs used to verify server cert.
|
||||
RootCAs: certpool,
|
||||
// ClientAuth = whether to request cert from server.
|
||||
// Since the server is set up for SSL, this happens
|
||||
// anyways.
|
||||
ClientAuth: tls.NoClientCert,
|
||||
// ClientCAs = certs used to validate client cert.
|
||||
ClientCAs: nil,
|
||||
// InsecureSkipVerify = verify that cert contents
|
||||
// match server. IP matches what is in cert etc.
|
||||
InsecureSkipVerify: true,
|
||||
// Certificates = list of certs client sends to server.
|
||||
Certificates: []tls.Certificate{cert},
|
||||
}
|
||||
}
|
||||
|
||||
var f MQTT.MessageHandler = func(client *MQTT.Client, msg MQTT.Message) {
|
||||
fmt.Printf("TOPIC: %s\n", msg.Topic())
|
||||
fmt.Printf("MSG: %s\n", msg.Payload())
|
||||
}
|
||||
|
||||
func main() {
|
||||
tlsconfig := NewTLSConfig()
|
||||
|
||||
opts := MQTT.NewClientOptions()
|
||||
opts.AddBroker("ssl://iot.eclipse.org:8883")
|
||||
opts.SetClientID("ssl-sample").SetTLSConfig(tlsconfig)
|
||||
opts.SetDefaultPublishHandler(f)
|
||||
|
||||
// Start the connection
|
||||
c := MQTT.NewClient(opts)
|
||||
if token := c.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
|
||||
c.Subscribe("/go-mqtt/sample", 0, nil)
|
||||
|
||||
i := 0
|
||||
for _ = range time.Tick(time.Duration(1) * time.Second) {
|
||||
if i == 5 {
|
||||
break
|
||||
}
|
||||
text := fmt.Sprintf("this is msg #%d!", i)
|
||||
c.Publish("/go-mqtt/sample", 0, false, text)
|
||||
i++
|
||||
}
|
||||
|
||||
c.Disconnect(250)
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
//"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
import MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
|
||||
func main() {
|
||||
//MQTT.DEBUG = log.New(os.Stdout, "", 0)
|
||||
//MQTT.ERROR = log.New(os.Stdout, "", 0)
|
||||
stdin := bufio.NewReader(os.Stdin)
|
||||
hostname, _ := os.Hostname()
|
||||
|
||||
server := flag.String("server", "tcp://127.0.0.1:1883", "The full URL of the MQTT server to connect to")
|
||||
topic := flag.String("topic", hostname, "Topic to publish the messages on")
|
||||
qos := flag.Int("qos", 0, "The QoS to send the messages at")
|
||||
retained := flag.Bool("retained", false, "Are the messages sent with the retained flag")
|
||||
clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection")
|
||||
username := flag.String("username", "", "A username to authenticate to the MQTT server")
|
||||
password := flag.String("password", "", "Password to match username")
|
||||
flag.Parse()
|
||||
|
||||
connOpts := MQTT.NewClientOptions().AddBroker(*server).SetClientID(*clientid).SetCleanSession(true)
|
||||
if *username != "" {
|
||||
connOpts.SetUsername(*username)
|
||||
if *password != "" {
|
||||
connOpts.SetPassword(*password)
|
||||
}
|
||||
}
|
||||
tlsConfig := &tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert}
|
||||
connOpts.SetTLSConfig(tlsConfig)
|
||||
|
||||
client := MQTT.NewClient(connOpts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
fmt.Println(token.Error())
|
||||
return
|
||||
}
|
||||
fmt.Printf("Connected to %s\n", *server)
|
||||
|
||||
for {
|
||||
message, err := stdin.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
os.Exit(0)
|
||||
}
|
||||
client.Publish(*topic, byte(*qos), *retained, message)
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
//"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
MQTT "git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git"
|
||||
)
|
||||
|
||||
func onMessageReceived(client *MQTT.Client, message MQTT.Message) {
|
||||
fmt.Printf("Received message on topic: %s\nMessage: %s\n", message.Topic(), message.Payload())
|
||||
}
|
||||
|
||||
var i int64
|
||||
|
||||
func main() {
|
||||
//MQTT.DEBUG = log.New(os.Stdout, "", 0)
|
||||
//MQTT.ERROR = log.New(os.Stdout, "", 0)
|
||||
c := make(chan os.Signal, 1)
|
||||
i = 0
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-c
|
||||
fmt.Println("signal received, exiting")
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
|
||||
server := flag.String("server", "tcp://127.0.0.1:1883", "The full url of the MQTT server to connect to ex: tcp://127.0.0.1:1883")
|
||||
topic := flag.String("topic", "#", "Topic to subscribe to")
|
||||
qos := flag.Int("qos", 0, "The QoS to subscribe to messages at")
|
||||
clientid := flag.String("clientid", hostname+strconv.Itoa(time.Now().Second()), "A clientid for the connection")
|
||||
username := flag.String("username", "", "A username to authenticate to the MQTT server")
|
||||
password := flag.String("password", "", "Password to match username")
|
||||
flag.Parse()
|
||||
|
||||
connOpts := &MQTT.ClientOptions{
|
||||
ClientID: *clientid,
|
||||
CleanSession: true,
|
||||
Username: *username,
|
||||
Password: *password,
|
||||
MaxReconnectInterval: 1 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
TLSConfig: tls.Config{InsecureSkipVerify: true, ClientAuth: tls.NoClientCert},
|
||||
}
|
||||
connOpts.AddBroker(*server)
|
||||
connOpts.OnConnect = func(c *MQTT.Client) {
|
||||
if token := c.Subscribe(*topic, byte(*qos), onMessageReceived); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
}
|
||||
}
|
||||
|
||||
client := MQTT.NewClient(connOpts)
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
panic(token.Error())
|
||||
} else {
|
||||
fmt.Printf("Connected to %s\n", *server)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
125
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go
generated
vendored
125
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/store.go
generated
vendored
|
@ -1,125 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
inboundPrefix = "i."
|
||||
outboundPrefix = "o."
|
||||
)
|
||||
|
||||
// Store is an interface which can be used to provide implementations
|
||||
// for message persistence.
|
||||
// Because we may have to store distinct messages with the same
|
||||
// message ID, we need a unique key for each message. This is
|
||||
// possible by prepending "i." or "o." to each message id
|
||||
type Store interface {
|
||||
Open()
|
||||
Put(string, packets.ControlPacket)
|
||||
Get(string) packets.ControlPacket
|
||||
All() []string
|
||||
Del(string)
|
||||
Close()
|
||||
Reset()
|
||||
}
|
||||
|
||||
// A key MUST have the form "X.[messageid]"
|
||||
// where X is 'i' or 'o'
|
||||
func mIDFromKey(key string) uint16 {
|
||||
s := key[2:]
|
||||
i, err := strconv.Atoi(s)
|
||||
chkerr(err)
|
||||
return uint16(i)
|
||||
}
|
||||
|
||||
// Return a string of the form "i.[id]"
|
||||
func inboundKeyFromMID(id uint16) string {
|
||||
return fmt.Sprintf("%s%d", inboundPrefix, id)
|
||||
}
|
||||
|
||||
// Return a string of the form "o.[id]"
|
||||
func outboundKeyFromMID(id uint16) string {
|
||||
return fmt.Sprintf("%s%d", outboundPrefix, id)
|
||||
}
|
||||
|
||||
// govern which outgoing messages are persisted
|
||||
func persistOutbound(s Store, m packets.ControlPacket) {
|
||||
switch m.Details().Qos {
|
||||
case 0:
|
||||
switch m.(type) {
|
||||
case *packets.PubackPacket, *packets.PubcompPacket:
|
||||
// Sending puback. delete matching publish
|
||||
// from ibound
|
||||
s.Del(inboundKeyFromMID(m.Details().MessageID))
|
||||
}
|
||||
case 1:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket, *packets.PubrelPacket, *packets.SubscribePacket, *packets.UnsubscribePacket:
|
||||
// Sending publish. store in obound
|
||||
// until puback received
|
||||
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
chkcond(false)
|
||||
}
|
||||
case 2:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket:
|
||||
// Sending publish. store in obound
|
||||
// until pubrel received
|
||||
s.Put(outboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
chkcond(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// govern which incoming messages are persisted
|
||||
func persistInbound(s Store, m packets.ControlPacket) {
|
||||
switch m.Details().Qos {
|
||||
case 0:
|
||||
switch m.(type) {
|
||||
case *packets.PubackPacket, *packets.SubackPacket, *packets.UnsubackPacket, *packets.PubcompPacket:
|
||||
// Received a puback. delete matching publish
|
||||
// from obound
|
||||
s.Del(outboundKeyFromMID(m.Details().MessageID))
|
||||
case *packets.PublishPacket, *packets.PubrecPacket, *packets.PingrespPacket, *packets.ConnackPacket:
|
||||
default:
|
||||
chkcond(false)
|
||||
}
|
||||
case 1:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket, *packets.PubrelPacket:
|
||||
// Received a publish. store it in ibound
|
||||
// until puback sent
|
||||
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
chkcond(false)
|
||||
}
|
||||
case 2:
|
||||
switch m.(type) {
|
||||
case *packets.PublishPacket:
|
||||
// Received a publish. store it in ibound
|
||||
// until pubrel received
|
||||
s.Put(inboundKeyFromMID(m.Details().MessageID), m)
|
||||
default:
|
||||
chkcond(false)
|
||||
}
|
||||
}
|
||||
}
|
156
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go
generated
vendored
156
Godeps/_workspace/src/git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/token.go
generated
vendored
|
@ -1,156 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2014 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Allan Stockdill-Mander
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
//PacketAndToken is a struct that contains both a ControlPacket and a
|
||||
//Token. This struct is passed via channels between the client interface
|
||||
//code and the underlying code responsible for sending and receiving
|
||||
//MQTT messages.
|
||||
type PacketAndToken struct {
|
||||
p packets.ControlPacket
|
||||
t Token
|
||||
}
|
||||
|
||||
//Token defines the interface for the tokens used to indicate when
|
||||
//actions have completed.
|
||||
type Token interface {
|
||||
Wait() bool
|
||||
WaitTimeout(time.Duration) bool
|
||||
flowComplete()
|
||||
Error() error
|
||||
}
|
||||
|
||||
type baseToken struct {
|
||||
m sync.RWMutex
|
||||
complete chan struct{}
|
||||
ready bool
|
||||
err error
|
||||
}
|
||||
|
||||
// Wait will wait indefinitely for the Token to complete, ie the Publish
|
||||
// to be sent and confirmed receipt from the broker
|
||||
func (b *baseToken) Wait() bool {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
if !b.ready {
|
||||
<-b.complete
|
||||
b.ready = true
|
||||
}
|
||||
return b.ready
|
||||
}
|
||||
|
||||
// WaitTimeout takes a time in ms to wait for the flow associated with the
|
||||
// Token to complete, returns true if it returned before the timeout or
|
||||
// returns false if the timeout occurred. In the case of a timeout the Token
|
||||
// does not have an error set in case the caller wishes to wait again
|
||||
func (b *baseToken) WaitTimeout(d time.Duration) bool {
|
||||
b.m.Lock()
|
||||
defer b.m.Unlock()
|
||||
if !b.ready {
|
||||
select {
|
||||
case <-b.complete:
|
||||
b.ready = true
|
||||
case <-time.After(d):
|
||||
}
|
||||
}
|
||||
return b.ready
|
||||
}
|
||||
|
||||
func (b *baseToken) flowComplete() {
|
||||
close(b.complete)
|
||||
}
|
||||
|
||||
func (b *baseToken) Error() error {
|
||||
b.m.RLock()
|
||||
defer b.m.RUnlock()
|
||||
return b.err
|
||||
}
|
||||
|
||||
func newToken(tType byte) Token {
|
||||
switch tType {
|
||||
case packets.Connect:
|
||||
return &ConnectToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Subscribe:
|
||||
return &SubscribeToken{baseToken: baseToken{complete: make(chan struct{})}, subResult: make(map[string]byte)}
|
||||
case packets.Publish:
|
||||
return &PublishToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Unsubscribe:
|
||||
return &UnsubscribeToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
case packets.Disconnect:
|
||||
return &DisconnectToken{baseToken: baseToken{complete: make(chan struct{})}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//ConnectToken is an extension of Token containing the extra fields
|
||||
//required to provide information about calls to Connect()
|
||||
type ConnectToken struct {
|
||||
baseToken
|
||||
returnCode byte
|
||||
}
|
||||
|
||||
//ReturnCode returns the acknowlegement code in the connack sent
|
||||
//in response to a Connect()
|
||||
func (c *ConnectToken) ReturnCode() byte {
|
||||
c.m.RLock()
|
||||
defer c.m.RUnlock()
|
||||
return c.returnCode
|
||||
}
|
||||
|
||||
//PublishToken is an extension of Token containing the extra fields
|
||||
//required to provide information about calls to Publish()
|
||||
type PublishToken struct {
|
||||
baseToken
|
||||
messageID uint16
|
||||
}
|
||||
|
||||
//MessageID returns the MQTT message ID that was assigned to the
|
||||
//Publish packet when it was sent to the broker
|
||||
func (p *PublishToken) MessageID() uint16 {
|
||||
return p.messageID
|
||||
}
|
||||
|
||||
//SubscribeToken is an extension of Token containing the extra fields
|
||||
//required to provide information about calls to Subscribe()
|
||||
type SubscribeToken struct {
|
||||
baseToken
|
||||
subs []string
|
||||
subResult map[string]byte
|
||||
}
|
||||
|
||||
//Result returns a map of topics that were subscribed to along with
|
||||
//the matching return code from the broker. This is either the Qos
|
||||
//value of the subscription or an error code.
|
||||
func (s *SubscribeToken) Result() map[string]byte {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
return s.subResult
|
||||
}
|
||||
|
||||
//UnsubscribeToken is an extension of Token containing the extra fields
|
||||
//required to provide information about calls to Unsubscribe()
|
||||
type UnsubscribeToken struct {
|
||||
baseToken
|
||||
}
|
||||
|
||||
//DisconnectToken is an extension of Token containing the extra fields
|
||||
//required to provide information about calls to Disconnect()
|
||||
type DisconnectToken struct {
|
||||
baseToken
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2014 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//InvalidQos is the error returned when an packet is to be sent
|
||||
//with an invalid Qos value
|
||||
var ErrInvalidQos = errors.New("Invalid QoS")
|
||||
|
||||
//InvalidTopicEmptyString is the error returned when a topic string
|
||||
//is passed in that is 0 length
|
||||
var ErrInvalidTopicEmptyString = errors.New("Invalid Topic; empty string")
|
||||
|
||||
//InvalidTopicMultilevel is the error returned when a topic string
|
||||
//is passed in that has the multi level wildcard in any position but
|
||||
//the last
|
||||
var ErrInvalidTopicMultilevel = errors.New("Invalid Topic; multi-level wildcard must be last level")
|
||||
|
||||
// Topic Names and Topic Filters
|
||||
// The MQTT v3.1.1 spec clarifies a number of ambiguities with regard
|
||||
// to the validity of Topic strings.
|
||||
// - A Topic must be between 1 and 65535 bytes.
|
||||
// - A Topic is case sensitive.
|
||||
// - A Topic may contain whitespace.
|
||||
// - A Topic containing a leading forward slash is different than a Topic without.
|
||||
// - A Topic may be "/" (two levels, both empty string).
|
||||
// - A Topic must be UTF-8 encoded.
|
||||
// - A Topic may contain any number of levels.
|
||||
// - A Topic may contain an empty level (two forward slashes in a row).
|
||||
// - A TopicName may not contain a wildcard.
|
||||
// - A TopicFilter may only have a # (multi-level) wildcard as the last level.
|
||||
// - A TopicFilter may contain any number of + (single-level) wildcards.
|
||||
// - A TopicFilter with a # will match the absense of a level
|
||||
// Example: a subscription to "foo/#" will match messages published to "foo".
|
||||
|
||||
func validateSubscribeMap(subs map[string]byte) ([]string, []byte, error) {
|
||||
var topics []string
|
||||
var qoss []byte
|
||||
for topic, qos := range subs {
|
||||
if err := validateTopicAndQos(topic, qos); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
topics = append(topics, topic)
|
||||
qoss = append(qoss, qos)
|
||||
}
|
||||
|
||||
return topics, qoss, nil
|
||||
}
|
||||
|
||||
func validateTopicAndQos(topic string, qos byte) error {
|
||||
if len(topic) == 0 {
|
||||
return ErrInvalidTopicEmptyString
|
||||
}
|
||||
|
||||
levels := strings.Split(topic, "/")
|
||||
for i, level := range levels {
|
||||
if level == "#" && i != len(levels)-1 {
|
||||
return ErrInvalidTopicMultilevel
|
||||
}
|
||||
}
|
||||
|
||||
if qos < 0 || qos > 2 {
|
||||
return ErrInvalidQos
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Internal levels of library output that are initialised to not print
|
||||
// anything but can be overridden by programmer
|
||||
var (
|
||||
ERROR *log.Logger
|
||||
CRITICAL *log.Logger
|
||||
WARN *log.Logger
|
||||
DEBUG *log.Logger
|
||||
)
|
||||
|
||||
func init() {
|
||||
ERROR = log.New(ioutil.Discard, "", 0)
|
||||
CRITICAL = log.New(ioutil.Discard, "", 0)
|
||||
WARN = log.New(ioutil.Discard, "", 0)
|
||||
DEBUG = log.New(ioutil.Discard, "", 0)
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
func init() {
|
||||
DEBUG = log.New(os.Stderr, "DEBUG ", log.Ltime)
|
||||
WARN = log.New(os.Stderr, "WARNING ", log.Ltime)
|
||||
CRITICAL = log.New(os.Stderr, "CRITICAL ", log.Ltime)
|
||||
ERROR = log.New(os.Stderr, "ERROR ", log.Ltime)
|
||||
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe("localhost:6060", nil))
|
||||
}()
|
||||
}
|
||||
|
||||
func Test_NewClient_simple(t *testing.T) {
|
||||
ops := NewClientOptions().SetClientID("foo").AddBroker("tcp://10.10.0.1:1883")
|
||||
c := NewClient(ops)
|
||||
|
||||
if c == nil {
|
||||
t.Fatalf("ops is nil")
|
||||
}
|
||||
|
||||
if c.options.ClientID != "foo" {
|
||||
t.Fatalf("bad client id")
|
||||
}
|
||||
|
||||
if c.options.Servers[0].Scheme != "tcp" {
|
||||
t.Fatalf("bad server scheme")
|
||||
}
|
||||
|
||||
if c.options.Servers[0].Host != "10.10.0.1:1883" {
|
||||
t.Fatalf("bad server host")
|
||||
}
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DummyToken struct{}
|
||||
|
||||
func (d *DummyToken) Wait() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *DummyToken) WaitTimeout(t time.Duration) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *DummyToken) flowComplete() {}
|
||||
|
||||
func (d *DummyToken) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_getID(t *testing.T) {
|
||||
mids := &messageIds{index: make(map[uint16]Token)}
|
||||
|
||||
i1 := mids.getID(&DummyToken{})
|
||||
|
||||
if i1 != 1 {
|
||||
t.Fatalf("i1 was wrong: %v", i1)
|
||||
}
|
||||
|
||||
i2 := mids.getID(&DummyToken{})
|
||||
|
||||
if i2 != 2 {
|
||||
t.Fatalf("i2 was wrong: %v", i2)
|
||||
}
|
||||
|
||||
for i := uint16(3); i < 100; i++ {
|
||||
id := mids.getID(&DummyToken{})
|
||||
if id != i {
|
||||
t.Fatalf("id was wrong expected %v got %v", i, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_freeID(t *testing.T) {
|
||||
mids := &messageIds{index: make(map[uint16]Token)}
|
||||
|
||||
i1 := mids.getID(&DummyToken{})
|
||||
mids.freeID(i1)
|
||||
|
||||
if i1 != 1 {
|
||||
t.Fatalf("i1 was wrong: %v", i1)
|
||||
}
|
||||
|
||||
i2 := mids.getID(&DummyToken{})
|
||||
fmt.Printf("i2: %v\n", i2)
|
||||
}
|
||||
|
||||
func Test_messageids_mix(t *testing.T) {
|
||||
mids := &messageIds{index: make(map[uint16]Token)}
|
||||
|
||||
done := make(chan bool)
|
||||
a := make(chan uint16, 3)
|
||||
b := make(chan uint16, 20)
|
||||
c := make(chan uint16, 100)
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10000; i++ {
|
||||
a <- mids.getID(&DummyToken{})
|
||||
mids.freeID(<-b)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10000; i++ {
|
||||
b <- mids.getID(&DummyToken{})
|
||||
mids.freeID(<-c)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 10000; i++ {
|
||||
c <- mids.getID(&DummyToken{})
|
||||
mids.freeID(<-a)
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
<-done
|
||||
<-done
|
||||
<-done
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_NewClientOptions_default(t *testing.T) {
|
||||
o := NewClientOptions()
|
||||
|
||||
if o.ClientID != "" {
|
||||
t.Fatalf("bad default client id")
|
||||
}
|
||||
|
||||
if o.Username != "" {
|
||||
t.Fatalf("bad default username")
|
||||
}
|
||||
|
||||
if o.Password != "" {
|
||||
t.Fatalf("bad default password")
|
||||
}
|
||||
|
||||
if o.KeepAlive != 30*time.Second {
|
||||
t.Fatalf("bad default timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NewClientOptions_mix(t *testing.T) {
|
||||
o := NewClientOptions()
|
||||
o.AddBroker("tcp://192.168.1.2:9999")
|
||||
o.SetClientID("myclientid")
|
||||
o.SetUsername("myuser")
|
||||
o.SetPassword("mypassword")
|
||||
o.SetKeepAlive(88)
|
||||
|
||||
if o.Servers[0].Scheme != "tcp" {
|
||||
t.Fatalf("bad scheme")
|
||||
}
|
||||
|
||||
if o.Servers[0].Host != "192.168.1.2:9999" {
|
||||
t.Fatalf("bad host")
|
||||
}
|
||||
|
||||
if o.ClientID != "myclientid" {
|
||||
t.Fatalf("bad set clientid")
|
||||
}
|
||||
|
||||
if o.Username != "myuser" {
|
||||
t.Fatalf("bad set username")
|
||||
}
|
||||
|
||||
if o.Password != "mypassword" {
|
||||
t.Fatalf("bad set password")
|
||||
}
|
||||
|
||||
if o.KeepAlive != 88 {
|
||||
t.Fatalf("bad set timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ModifyOptions(t *testing.T) {
|
||||
o := NewClientOptions()
|
||||
o.AddBroker("tcp://3.3.3.3:12345")
|
||||
c := NewClient(o)
|
||||
o.AddBroker("ws://2.2.2.2:9999")
|
||||
o.SetOrderMatters(false)
|
||||
|
||||
if c.options.Servers[0].Scheme != "tcp" {
|
||||
t.Fatalf("client options.server.Scheme was modified")
|
||||
}
|
||||
|
||||
// if c.options.server.Host != "2.2.2.2:9999" {
|
||||
// t.Fatalf("client options.server.Host was modified")
|
||||
// }
|
||||
|
||||
if o.Order != false {
|
||||
t.Fatalf("options.order was not modified")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_TLSConfig(t *testing.T) {
|
||||
o := NewClientOptions().SetTLSConfig(&tls.Config{
|
||||
RootCAs: x509.NewCertPool(),
|
||||
ClientAuth: tls.NoClientCert,
|
||||
ClientCAs: x509.NewCertPool(),
|
||||
InsecureSkipVerify: true})
|
||||
|
||||
c := NewClient(o)
|
||||
|
||||
if c.options.TLSConfig.ClientAuth != tls.NoClientCert {
|
||||
t.Fatalf("client options.tlsConfig ClientAuth incorrect")
|
||||
}
|
||||
|
||||
if c.options.TLSConfig.InsecureSkipVerify != true {
|
||||
t.Fatalf("client options.tlsConfig InsecureSkipVerify incorrect")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_OnConnectionLost(t *testing.T) {
|
||||
onconnlost := func(client *Client, err error) {
|
||||
panic(err)
|
||||
}
|
||||
o := NewClientOptions().SetConnectionLostHandler(onconnlost)
|
||||
|
||||
c := NewClient(o)
|
||||
|
||||
if c.options.OnConnectionLost == nil {
|
||||
t.Fatalf("client options.onconnlost was nil")
|
||||
}
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_NewPingReqMessage(t *testing.T) {
|
||||
pr := packets.NewControlPacket(packets.Pingreq).(*packets.PingreqPacket)
|
||||
if pr.MessageType != packets.Pingreq {
|
||||
t.Errorf("NewPingReqMessage bad msg type: %v", pr.MessageType)
|
||||
}
|
||||
if pr.RemainingLength != 0 {
|
||||
t.Errorf("NewPingReqMessage bad remlen, expected 0, got %d", pr.RemainingLength)
|
||||
}
|
||||
|
||||
exp := []byte{
|
||||
0xC0,
|
||||
0x00,
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
pr.Write(&buf)
|
||||
bs := buf.Bytes()
|
||||
|
||||
if len(bs) != 2 {
|
||||
t.Errorf("NewPingReqMessage.Bytes() wrong length: %d", len(bs))
|
||||
}
|
||||
|
||||
if exp[0] != bs[0] || exp[1] != bs[1] {
|
||||
t.Errorf("NewPingMessage.Bytes() wrong")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_DecodeMessage_pingresp(t *testing.T) {
|
||||
bs := bytes.NewBuffer([]byte{
|
||||
0xD0,
|
||||
0x00,
|
||||
})
|
||||
presp, _ := packets.ReadPacket(bs)
|
||||
if presp.(*packets.PingrespPacket).MessageType != packets.Pingresp {
|
||||
t.Errorf("DecodeMessage ping response wrong msg type: %v", presp.(*packets.PingrespPacket).MessageType)
|
||||
}
|
||||
if presp.(*packets.PingrespPacket).RemainingLength != 0 {
|
||||
t.Errorf("DecodeMessage ping response wrong rem len: %d", presp.(*packets.PingrespPacket).RemainingLength)
|
||||
}
|
||||
}
|
|
@ -1,287 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_newRouter(t *testing.T) {
|
||||
router, stop := newRouter()
|
||||
if router == nil {
|
||||
t.Fatalf("router is nil")
|
||||
}
|
||||
if stop == nil {
|
||||
t.Fatalf("stop is nil")
|
||||
}
|
||||
if router.routes.Len() != 0 {
|
||||
t.Fatalf("router.routes was not empty")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_AddRoute(t *testing.T) {
|
||||
router, _ := newRouter()
|
||||
calledback := false
|
||||
cb := func(client *Client, msg Message) {
|
||||
calledback = true
|
||||
}
|
||||
router.addRoute("/alpha", cb)
|
||||
|
||||
if router.routes.Len() != 1 {
|
||||
t.Fatalf("router.routes was wrong")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Match(t *testing.T) {
|
||||
router, _ := newRouter()
|
||||
router.addRoute("/alpha", nil)
|
||||
|
||||
if !router.routes.Front().Value.(*route).match("/alpha") {
|
||||
t.Fatalf("match function is bad")
|
||||
}
|
||||
|
||||
if router.routes.Front().Value.(*route).match("alpha") {
|
||||
t.Fatalf("match function is bad")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_match(t *testing.T) {
|
||||
|
||||
check := func(route, topic string, exp bool) {
|
||||
result := routeIncludesTopic(route, topic)
|
||||
if exp != result {
|
||||
t.Errorf("match was bad R: %v, T: %v, EXP: %v", route, topic, exp)
|
||||
}
|
||||
}
|
||||
|
||||
// ** Basic **
|
||||
R := ""
|
||||
T := ""
|
||||
check(R, T, true)
|
||||
|
||||
R = "x"
|
||||
T = ""
|
||||
check(R, T, false)
|
||||
|
||||
R = ""
|
||||
T = "x"
|
||||
check(R, T, false)
|
||||
|
||||
R = "x"
|
||||
T = "x"
|
||||
check(R, T, true)
|
||||
|
||||
R = "x"
|
||||
T = "X"
|
||||
check(R, T, false)
|
||||
|
||||
R = "alpha"
|
||||
T = "alpha"
|
||||
check(R, T, true)
|
||||
|
||||
R = "alpha"
|
||||
T = "beta"
|
||||
check(R, T, false)
|
||||
|
||||
// ** / **
|
||||
R = "/"
|
||||
T = "/"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/one"
|
||||
T = "/one"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/"
|
||||
T = "/two"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/two"
|
||||
T = "/"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/two"
|
||||
T = "two"
|
||||
check(R, T, false) // a leading "/" creates a different topic
|
||||
|
||||
R = "/a/"
|
||||
T = "/a"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/a/"
|
||||
T = "/a/b"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/a/b"
|
||||
T = "/a/b"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/b/"
|
||||
T = "/a/b"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/a/b"
|
||||
T = "/R/b"
|
||||
check(R, T, false)
|
||||
|
||||
// ** + **
|
||||
R = "/a/+/c"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/+/b/c"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/b/+"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/+/+"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/+/+/+"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/+/+/c"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/b/c/+" // different number of levels
|
||||
T = "/a/b/c"
|
||||
check(R, T, false)
|
||||
|
||||
R = "+"
|
||||
T = "a"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/+"
|
||||
T = "a"
|
||||
check(R, T, false)
|
||||
|
||||
R = "+/+"
|
||||
T = "/a"
|
||||
check(R, T, true)
|
||||
|
||||
R = "+/+"
|
||||
T = "a"
|
||||
check(R, T, false)
|
||||
|
||||
// ** # **
|
||||
R = "#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
// R = "/#/" // not valid
|
||||
// T = "/a/b/c"
|
||||
// check(R, T, true)
|
||||
|
||||
R = "/#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/a/b/#"
|
||||
T = "/a/b/c"
|
||||
check(R, T, true)
|
||||
|
||||
// ** unicode **
|
||||
R = "☃"
|
||||
T = "☃"
|
||||
check(R, T, true)
|
||||
|
||||
R = "✈"
|
||||
T = "☃"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/☃/✈"
|
||||
T = "/☃/ッ"
|
||||
check(R, T, false)
|
||||
|
||||
R = "#"
|
||||
T = "/☃/ッ"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/☃/+"
|
||||
T = "/☃/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/☃/#"
|
||||
T = "/☃/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/☃/ッ/♫/ø/+"
|
||||
T = "/☃/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/☃/ッ/+/ø/☹☹☹"
|
||||
T = "/☃/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, true)
|
||||
|
||||
R = "/+/a/ッ/+/ø/☹☹☹"
|
||||
T = "/b/♫/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, false)
|
||||
|
||||
R = "/+/♫/ッ/+/ø/☹☹☹"
|
||||
T = "/b/♫/ッ/♫/ø/☹☹☹"
|
||||
check(R, T, true)
|
||||
}
|
||||
|
||||
func Test_MatchAndDispatch(t *testing.T) {
|
||||
calledback := make(chan bool)
|
||||
|
||||
cb := func(c *Client, m Message) {
|
||||
calledback <- true
|
||||
}
|
||||
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = 2
|
||||
pub.TopicName = "a"
|
||||
pub.Payload = []byte("foo")
|
||||
|
||||
msgs := make(chan *packets.PublishPacket)
|
||||
|
||||
router, stopper := newRouter()
|
||||
router.addRoute("a", cb)
|
||||
|
||||
router.matchAndDispatch(msgs, true, nil)
|
||||
|
||||
msgs <- pub
|
||||
|
||||
<-calledback
|
||||
|
||||
stopper <- true
|
||||
|
||||
select {
|
||||
case msgs <- pub:
|
||||
t.Errorf("msgs should not have a listener")
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
|
@ -1,668 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"git.eclipse.org/gitroot/paho/org.eclipse.paho.mqtt.golang.git/packets"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_fullpath(t *testing.T) {
|
||||
p := fullpath("/tmp/store", "o.44324")
|
||||
e := "/tmp/store/o.44324.msg"
|
||||
if p != e {
|
||||
t.Fatalf("full path expected %s, got %s", e, p)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_exists(t *testing.T) {
|
||||
b := exists("/")
|
||||
if !b {
|
||||
t.Errorf("/proc/cpuinfo was not found")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_exists_no(t *testing.T) {
|
||||
b := exists("/this/path/is/not/real/i/hope")
|
||||
if b {
|
||||
t.Errorf("you have some strange files")
|
||||
}
|
||||
}
|
||||
|
||||
func isemptydir(dir string) bool {
|
||||
chkcond(exists(dir))
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
chkerr(err)
|
||||
return len(files) == 0
|
||||
}
|
||||
|
||||
func Test_mIDFromKey(t *testing.T) {
|
||||
key := "i.123"
|
||||
exp := uint16(123)
|
||||
res := mIDFromKey(key)
|
||||
if exp != res {
|
||||
t.Fatalf("mIDFromKey failed")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_inboundKeyFromMID(t *testing.T) {
|
||||
id := uint16(9876)
|
||||
exp := "i.9876"
|
||||
res := inboundKeyFromMID(id)
|
||||
if exp != res {
|
||||
t.Fatalf("inboundKeyFromMID failed")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_outboundKeyFromMID(t *testing.T) {
|
||||
id := uint16(7654)
|
||||
exp := "o.7654"
|
||||
res := outboundKeyFromMID(id)
|
||||
if exp != res {
|
||||
t.Fatalf("outboundKeyFromMID failed")
|
||||
}
|
||||
}
|
||||
|
||||
/************************
|
||||
**** persistOutbound ****
|
||||
************************/
|
||||
|
||||
func Test_persistOutbound_connect(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Connect).(*packets.ConnectPacket)
|
||||
m.Qos = 0
|
||||
m.Username = "user"
|
||||
m.Password = []byte("pass")
|
||||
m.ClientIdentifier = "cid"
|
||||
//m := newConnectMsg(false, false, QOS_ZERO, false, "", nil, "cid", "user", "pass", 10)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_publish_0(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 0
|
||||
m.TopicName = "/popub0"
|
||||
m.Payload = []byte{0xBB, 0x00}
|
||||
m.MessageID = 40
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_publish_1(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 1
|
||||
m.TopicName = "/popub1"
|
||||
m.Payload = []byte{0xBB, 0x00}
|
||||
m.MessageID = 41
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 41 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_publish_2(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 2
|
||||
m.TopicName = "/popub2"
|
||||
m.Payload = []byte{0xBB, 0x00}
|
||||
m.MessageID = 42
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 42 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_puback(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_pubrec(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_pubrel(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
m.MessageID = 43
|
||||
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 43 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_pubcomp(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_subscribe(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Subscribe).(*packets.SubscribePacket)
|
||||
m.Topics = []string{"/posub"}
|
||||
m.Qoss = []byte{1}
|
||||
m.MessageID = 44
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 44 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_unsubscribe(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Unsubscribe).(*packets.UnsubscribePacket)
|
||||
m.Topics = []string{"/posub"}
|
||||
m.MessageID = 45
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 45 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_pingreq(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Pingreq)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistOutbound_disconnect(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Disconnect)
|
||||
persistOutbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistOutbound put message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistOutbound get message it should not have")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistOutbound del message it should not have")
|
||||
}
|
||||
}
|
||||
|
||||
/************************
|
||||
**** persistInbound ****
|
||||
************************/
|
||||
|
||||
func Test_persistInbound_connack(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Connack)
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_publish_0(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 0
|
||||
m.TopicName = "/pipub0"
|
||||
m.Payload = []byte{0xCC, 0x01}
|
||||
m.MessageID = 50
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_publish_1(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 1
|
||||
m.TopicName = "/pipub1"
|
||||
m.Payload = []byte{0xCC, 0x02}
|
||||
m.MessageID = 51
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 51 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_publish_2(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
m.Qos = 2
|
||||
m.TopicName = "/pipub2"
|
||||
m.Payload = []byte{0xCC, 0x03}
|
||||
m.MessageID = 52
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 52 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_puback(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = 1
|
||||
pub.TopicName = "/pub1"
|
||||
pub.Payload = []byte{0xCC, 0x04}
|
||||
pub.MessageID = 53
|
||||
publishKey := inboundKeyFromMID(pub.MessageID)
|
||||
ts.Put(publishKey, pub)
|
||||
|
||||
m := packets.NewControlPacket(packets.Puback).(*packets.PubackPacket)
|
||||
m.MessageID = 53
|
||||
|
||||
persistInbound(ts, m) // "deletes" packets.Publish from store
|
||||
|
||||
if len(ts.mput) != 1 { // not actually deleted in TestStore
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 || ts.mdel[0] != 53 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_pubrec(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = 2
|
||||
pub.TopicName = "/pub2"
|
||||
pub.Payload = []byte{0xCC, 0x05}
|
||||
pub.MessageID = 54
|
||||
publishKey := inboundKeyFromMID(pub.MessageID)
|
||||
ts.Put(publishKey, pub)
|
||||
|
||||
m := packets.NewControlPacket(packets.Pubrec).(*packets.PubrecPacket)
|
||||
m.MessageID = 54
|
||||
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 1 || ts.mput[0] != 54 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_pubrel(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
pub := packets.NewControlPacket(packets.Publish).(*packets.PublishPacket)
|
||||
pub.Qos = 2
|
||||
pub.TopicName = "/pub2"
|
||||
pub.Payload = []byte{0xCC, 0x06}
|
||||
pub.MessageID = 55
|
||||
publishKey := inboundKeyFromMID(pub.MessageID)
|
||||
ts.Put(publishKey, pub)
|
||||
|
||||
m := packets.NewControlPacket(packets.Pubrel).(*packets.PubrelPacket)
|
||||
m.MessageID = 55
|
||||
|
||||
persistInbound(ts, m) // will overwrite publish
|
||||
|
||||
if len(ts.mput) != 2 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_pubcomp(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
|
||||
m := packets.NewControlPacket(packets.Pubcomp).(*packets.PubcompPacket)
|
||||
m.MessageID = 56
|
||||
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 || ts.mdel[0] != 56 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_suback(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
|
||||
m := packets.NewControlPacket(packets.Suback).(*packets.SubackPacket)
|
||||
m.MessageID = 57
|
||||
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 || ts.mdel[0] != 57 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_unsuback(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
|
||||
m := packets.NewControlPacket(packets.Unsuback).(*packets.UnsubackPacket)
|
||||
m.MessageID = 58
|
||||
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 1 || ts.mdel[0] != 58 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_persistInbound_pingresp(t *testing.T) {
|
||||
ts := &TestStore{}
|
||||
m := packets.NewControlPacket(packets.Pingresp)
|
||||
|
||||
persistInbound(ts, m)
|
||||
|
||||
if len(ts.mput) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mget) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
|
||||
if len(ts.mdel) != 0 {
|
||||
t.Fatalf("persistInbound in bad state")
|
||||
}
|
||||
}
|
||||
|
||||
/***********
|
||||
* restore *
|
||||
***********/
|
||||
|
||||
func ensureRestoreDir() {
|
||||
if exists("/tmp/restore") {
|
||||
rerr := os.RemoveAll("/tmp/restore")
|
||||
chkerr(rerr)
|
||||
}
|
||||
os.Mkdir("/tmp/restore", 0766)
|
||||
}
|
||||
|
||||
func writeToRestore(fname, content string) {
|
||||
f, cerr := os.Create("/tmp/restore/" + fname)
|
||||
chkerr(cerr)
|
||||
chkcond(f != nil)
|
||||
w := bufio.NewWriter(f)
|
||||
w.Write([]byte(content))
|
||||
w.Flush()
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func verifyFromRestore(fname, content string, t *testing.T) {
|
||||
msg, oerr := os.Open("/tmp/restore/" + fname)
|
||||
chkerr(oerr)
|
||||
all, rerr := ioutil.ReadAll(msg)
|
||||
chkerr(rerr)
|
||||
msg.Close()
|
||||
s := string(all)
|
||||
if s != content {
|
||||
t.Fatalf("verify content expected `%s` but got `%s`", content, s)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_restore_1(t *testing.T) {
|
||||
ensureRestoreDir()
|
||||
|
||||
writeToRestore("i.1.bkp", "this is critical 1")
|
||||
|
||||
restore("/tmp/restore")
|
||||
|
||||
chkcond(!exists("/tmp/restore/i.1.bkp"))
|
||||
chkcond(exists("/tmp/restore/i.1.msg"))
|
||||
|
||||
verifyFromRestore("i.1.msg", "this is critical 1", t)
|
||||
}
|
||||
|
||||
func Test_restore_2(t *testing.T) {
|
||||
ensureRestoreDir()
|
||||
|
||||
writeToRestore("o.2.msg", "this is critical 2")
|
||||
|
||||
restore("/tmp/restore")
|
||||
|
||||
chkcond(!exists("/tmp/restore/o.2.bkp"))
|
||||
chkcond(exists("/tmp/restore/o.2.msg"))
|
||||
|
||||
verifyFromRestore("o.2.msg", "this is critical 2", t)
|
||||
}
|
||||
|
||||
func Test_restore_3(t *testing.T) {
|
||||
ensureRestoreDir()
|
||||
|
||||
N := 20
|
||||
// evens are .msg
|
||||
// odds are .bkp
|
||||
for i := 0; i < N; i++ {
|
||||
content := fmt.Sprintf("foo %d bar", i)
|
||||
if i%2 == 0 {
|
||||
mname := fmt.Sprintf("i.%d.msg", i)
|
||||
writeToRestore(mname, content)
|
||||
} else {
|
||||
mname := fmt.Sprintf("i.%d.bkp", i)
|
||||
writeToRestore(mname, content)
|
||||
}
|
||||
}
|
||||
|
||||
restore("/tmp/restore")
|
||||
|
||||
for i := 0; i < N; i++ {
|
||||
mname := fmt.Sprintf("i.%d.msg", i)
|
||||
bname := fmt.Sprintf("i.%d.bkp", i)
|
||||
content := fmt.Sprintf("foo %d bar", i)
|
||||
chkcond(!exists("/tmp/restore/" + bname))
|
||||
chkcond(exists("/tmp/restore/" + mname))
|
||||
|
||||
verifyFromRestore(mname, content, t)
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 IBM Corp.
|
||||
*
|
||||
* All rights reserved. This program and the accompanying materials
|
||||
* are made available under the terms of the Eclipse Public License v1.0
|
||||
* which accompanies this distribution, and is available at
|
||||
* http://www.eclipse.org/legal/epl-v10.html
|
||||
*
|
||||
* Contributors:
|
||||
* Seth Hoenig
|
||||
* Allan Stockdill-Mander
|
||||
* Mike Robertson
|
||||
*/
|
||||
|
||||
package mqtt
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_ValidateTopicAndQos_qos3(t *testing.T) {
|
||||
e := validateTopicAndQos("a", 3)
|
||||
if e != ErrInvalidQos {
|
||||
t.Fatalf("invalid error for invalid qos")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateTopicAndQos_ES(t *testing.T) {
|
||||
e := validateTopicAndQos("", 0)
|
||||
if e != ErrInvalidTopicEmptyString {
|
||||
t.Fatalf("invalid error for empty topic name")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateTopicAndQos_a_0(t *testing.T) {
|
||||
e := validateTopicAndQos("a", 0)
|
||||
if e != nil {
|
||||
t.Fatalf("error from valid NewTopicFilter")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateTopicAndQos_H(t *testing.T) {
|
||||
e := validateTopicAndQos("a/#/c", 0)
|
||||
if e != ErrInvalidTopicMultilevel {
|
||||
t.Fatalf("invalid error for bad multilevel topic filter")
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
*.test
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
.vagrant
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
|
@ -1,41 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
|
||||
env:
|
||||
global:
|
||||
- KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
|
||||
- TOXIPROXY_ADDR=http://localhost:8474
|
||||
- KAFKA_INSTALL_ROOT=/home/travis/kafka
|
||||
- KAFKA_HOSTNAME=localhost
|
||||
- DEBUG=true
|
||||
matrix:
|
||||
- KAFKA_VERSION=0.8.1.1
|
||||
- KAFKA_VERSION=0.8.2.1
|
||||
|
||||
before_install:
|
||||
- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
|
||||
- vagrant/install_cluster.sh
|
||||
- vagrant/boot_cluster.sh
|
||||
- vagrant/create_topics.sh
|
||||
|
||||
|
||||
install:
|
||||
- make install_dependencies
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make vet
|
||||
- make errcheck
|
||||
- make fmt
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: tip
|
||||
env: KAFKA_VERSION=0.8.2.1
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
sudo: false
|
|
@ -1,157 +0,0 @@
|
|||
# Changelog
|
||||
|
||||
#### Version 1.5.0 (unreleased)
|
||||
|
||||
New Features:
|
||||
- TLS-encrypted network connections are now supported. This feature is subject
|
||||
to change when Kafka releases built-in TLS support, but for now this is
|
||||
enough to work with TLS-terminating proxies
|
||||
([#154](https://github.com/Shopify/sarama/pull/154)).
|
||||
|
||||
Improvements:
|
||||
- The consumer will not block if a single partition is not drained by the user;
|
||||
all other partitions will continue to consume normally
|
||||
([#485](https://github.com/Shopify/sarama/pull/485)).
|
||||
- Formatting of error strings has been much improved
|
||||
([#495](https://github.com/Shopify/sarama/pull/495)).
|
||||
- Internal refactoring of the producer for code cleanliness and to enable
|
||||
future work ([#300](https://github.com/Shopify/sarama/pull/300)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a potential deadlock in the consumer on shutdown
|
||||
([#475](https://github.com/Shopify/sarama/pull/475)).
|
||||
|
||||
#### Version 1.4.3 (2015-07-21)
|
||||
|
||||
Bug Fixes:
|
||||
- Don't include the partitioner in the producer's "fetch partitions"
|
||||
circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
|
||||
- Don't retry messages until the broker is closed when abandoning a broker in
|
||||
the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
|
||||
- Update the import path for snappy-go, it has moved again and the API has
|
||||
changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
|
||||
|
||||
#### Version 1.4.2 (2015-05-27)
|
||||
|
||||
Bug Fixes:
|
||||
- Update the import path for snappy-go, it has moved from google code to github
|
||||
([#456](https://github.com/Shopify/sarama/pull/456)).
|
||||
|
||||
#### Version 1.4.1 (2015-05-25)
|
||||
|
||||
Improvements:
|
||||
- Optimizations when decoding snappy messages, thanks to John Potocny
|
||||
([#446](https://github.com/Shopify/sarama/pull/446)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix hypothetical race conditions on producer shutdown
|
||||
([#450](https://github.com/Shopify/sarama/pull/450),
|
||||
[#451](https://github.com/Shopify/sarama/pull/451)).
|
||||
|
||||
#### Version 1.4.0 (2015-05-01)
|
||||
|
||||
New Features:
|
||||
- The consumer now implements `Topics()` and `Partitions()` methods to enable
|
||||
users to dynamically choose what topics/partitions to consume without
|
||||
instantiating a full client
|
||||
([#431](https://github.com/Shopify/sarama/pull/431)).
|
||||
- The partition-consumer now exposes the high water mark offset value returned
|
||||
by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
|
||||
- Added a `kafka-console-consumer` tool capable of handling multiple
|
||||
partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
|
||||
([#439](https://github.com/Shopify/sarama/pull/439),
|
||||
[#442](https://github.com/Shopify/sarama/pull/442)).
|
||||
|
||||
Improvements:
|
||||
- The producer's logging during retry scenarios is more consistent, more
|
||||
useful, and slightly less verbose
|
||||
([#429](https://github.com/Shopify/sarama/pull/429)).
|
||||
- The client now shuffles its initial list of seed brokers in order to prevent
|
||||
thundering herd on the first broker in the list
|
||||
([#441](https://github.com/Shopify/sarama/pull/441)).
|
||||
|
||||
Bug Fixes:
|
||||
- The producer now correctly manages its state if retries occur when it is
|
||||
shutting down, fixing several instances of confusing behaviour and at least
|
||||
one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
|
||||
- The consumer now handles messages for different partitions asynchronously,
|
||||
making it much more resilient to specific user code ordering
|
||||
([#325](https://github.com/Shopify/sarama/pull/325)).
|
||||
|
||||
#### Version 1.3.0 (2015-04-16)
|
||||
|
||||
New Features:
|
||||
- The client now tracks consumer group coordinators using
|
||||
ConsumerMetadataRequests similar to how it tracks partition leadership using
|
||||
regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
|
||||
This adds two methods to the client API:
|
||||
- `Coordinator(consumerGroup string) (*Broker, error)`
|
||||
- `RefreshCoordinator(consumerGroup string) error`
|
||||
|
||||
Improvements:
|
||||
- ConsumerMetadataResponses now automatically create a Broker object out of the
|
||||
ID/address/port combination for the Coordinator; accessing the fields
|
||||
individually has been deprecated
|
||||
([#413](https://github.com/Shopify/sarama/pull/413)).
|
||||
- Much improved handling of `OffsetOutOfRange` errors in the consumer.
|
||||
Consumers will fail to start if the provided offset is out of range
|
||||
([#418](https://github.com/Shopify/sarama/pull/418))
|
||||
and they will automatically shut down if the offset falls out of range
|
||||
([#424](https://github.com/Shopify/sarama/pull/424)).
|
||||
- Small performance improvement in encoding and decoding protocol messages
|
||||
([#427](https://github.com/Shopify/sarama/pull/427)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix a rare race condition in the client's background metadata refresher if
|
||||
it happens to be activated while the client is being closed
|
||||
([#422](https://github.com/Shopify/sarama/pull/422)).
|
||||
|
||||
#### Version 1.2.0 (2015-04-07)
|
||||
|
||||
Improvements:
|
||||
- The producer's behaviour when `Flush.Frequency` is set is now more intuitive
|
||||
([#389](https://github.com/Shopify/sarama/pull/389)).
|
||||
- The producer is now somewhat more memory-efficient during and after retrying
|
||||
messages due to an improved queue implementation
|
||||
([#396](https://github.com/Shopify/sarama/pull/396)).
|
||||
- The consumer produces much more useful logging output when leadership
|
||||
changes ([#385](https://github.com/Shopify/sarama/pull/385)).
|
||||
- The client's `GetOffset` method will now automatically refresh metadata and
|
||||
retry once in the event of stale information or similar
|
||||
([#394](https://github.com/Shopify/sarama/pull/394)).
|
||||
- Broker connections now have support for using TCP keepalives
|
||||
([#407](https://github.com/Shopify/sarama/issues/407)).
|
||||
|
||||
Bug Fixes:
|
||||
- The OffsetCommitRequest message now correctly implements all three possible
|
||||
API versions ([#390](https://github.com/Shopify/sarama/pull/390),
|
||||
[#400](https://github.com/Shopify/sarama/pull/400)).
|
||||
|
||||
#### Version 1.1.0 (2015-03-20)
|
||||
|
||||
Improvements:
|
||||
- Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
|
||||
broken topics don't choke throughput
|
||||
([#373](https://github.com/Shopify/sarama/pull/373)).
|
||||
|
||||
Bug Fixes:
|
||||
- Fix the producer's internal reference counting in certain unusual scenarios
|
||||
([#367](https://github.com/Shopify/sarama/pull/367)).
|
||||
- Fix the consumer's internal reference counting in certain unusual scenarios
|
||||
([#369](https://github.com/Shopify/sarama/pull/369)).
|
||||
- Fix a condition where the producer's internal control messages could have
|
||||
gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
|
||||
- Fix an issue where invalid partition lists would be cached when asking for
|
||||
metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
|
||||
|
||||
|
||||
#### Version 1.0.0 (2015-03-17)
|
||||
|
||||
Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
|
||||
|
||||
- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
|
||||
- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
|
||||
- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
|
||||
- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
|
||||
- All the configuration values have been unified in the `Config` struct.
|
||||
- Much improved test suite.
|
|
@ -1,31 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
Contributions are always welcome, both reporting issues and submitting pull requests!
|
||||
|
||||
### Reporting issues
|
||||
|
||||
Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
|
||||
|
||||
- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
|
||||
- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
|
||||
- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
|
||||
|
||||
Also, please include the following information about your environment, so we can help you faster:
|
||||
|
||||
- What version of Kafka are you using?
|
||||
- What version of Go are you using?
|
||||
- What are the values of your Producer/Consumer/Client configuration?
|
||||
|
||||
|
||||
### Submitting pull requests
|
||||
|
||||
We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
|
||||
|
||||
- If you plan to work on something major, please open an issue to discuss the design first.
|
||||
- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
|
||||
- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
|
||||
- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
|
||||
- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
|
||||
- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
|
||||
- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
|
||||
- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions
|
|
@ -1,20 +0,0 @@
|
|||
Copyright (c) 2013 Evan Huus
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,24 +0,0 @@
|
|||
default: fmt vet errcheck test
|
||||
|
||||
test:
|
||||
go test -v -timeout 60s -race ./...
|
||||
|
||||
vet:
|
||||
go vet ./...
|
||||
|
||||
errcheck:
|
||||
errcheck github.com/Shopify/sarama/...
|
||||
|
||||
fmt:
|
||||
@if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
|
||||
|
||||
install_dependencies: install_errcheck install_go_vet get
|
||||
|
||||
install_errcheck:
|
||||
go get github.com/kisielk/errcheck
|
||||
|
||||
install_go_vet:
|
||||
go get golang.org/x/tools/cmd/vet
|
||||
|
||||
get:
|
||||
go get -t
|
|
@ -1,31 +0,0 @@
|
|||
sarama
|
||||
======
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/Shopify/sarama?status.png)](https://godoc.org/github.com/Shopify/sarama)
|
||||
[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
|
||||
|
||||
Sarama is an MIT-licensed Go client library for Apache Kafka 0.8 (and later).
|
||||
|
||||
### Getting started
|
||||
|
||||
- API documentation and example are available via godoc at https://godoc.org/github.com/Shopify/sarama.
|
||||
- Mocks for testing are available in the [mocks](./mocks) subpackage.
|
||||
- The [examples](./examples) directory contains more elaborate example applications.
|
||||
- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
|
||||
- There is a google group for Kafka client users and authors at https://groups.google.com/forum/#!forum/kafka-clients
|
||||
|
||||
### Compatibility and API stability
|
||||
|
||||
Sarama provides a "2 releases + 2 months" compatibility guarantee: we support the two latest releases of Kafka
|
||||
and Go, and we provide a two month grace period for older releases. This means we currently officially
|
||||
support Go 1.3 and 1.4, and Kafka 0.8.1 and 0.8.2.
|
||||
|
||||
Sarama follows semantic versioning and provides API stability via the gopkg.in service.
|
||||
You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
|
||||
A changelog is available [here](CHANGELOG.md).
|
||||
|
||||
### Other
|
||||
|
||||
* [Sarama wiki](https://github.com/Shopify/sarama/wiki) to get started hacking on sarama itself.
|
||||
* [Kafka Project Home](https://kafka.apache.org/)
|
||||
* [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
|
|
@ -1,22 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
MEMORY = 3072
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "hashicorp/precise64"
|
||||
|
||||
config.vm.provision :shell, path: "vagrant/provision.sh"
|
||||
|
||||
config.vm.network "private_network", ip: "192.168.100.67"
|
||||
|
||||
config.vm.provider "vmware_fusion" do |v|
|
||||
v.vmx["memsize"] = MEMORY.to_s
|
||||
end
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = MEMORY
|
||||
end
|
||||
end
|
|
@ -1,924 +0,0 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/breaker"
|
||||
"github.com/eapache/queue"
|
||||
)
|
||||
|
||||
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
|
||||
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
|
||||
// and parses responses for errors. You must read from the Errors() channel or the
|
||||
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
|
||||
// leaks: it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer, flushing any messages it may have
|
||||
// buffered. The shutdown has completed when both the Errors and Successes channels
|
||||
// have been closed. When calling AsyncClose, you *must* continue to read from those
|
||||
// channels in order to drain the results of any messages in flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and flushes any messages it may have buffered.
|
||||
// You must call this function before a producer object passes out of scope, as
|
||||
// it may otherwise leak memory. You must call this before calling Close on the
|
||||
// underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when AckSuccesses is enabled.
|
||||
// If Return.Successes is true, you MUST read from this channel or the Producer will deadlock.
|
||||
// It is suggested that you send and read messages together in a single select statement.
|
||||
Successes() <-chan *ProducerMessage
|
||||
|
||||
// Errors is the error output channel back to the user. You MUST read from this channel
|
||||
// or the Producer will deadlock when the channel is full. Alternatively, you can set
|
||||
// Producer.Return.Errors in your config to false, which prevents errors to be returned.
|
||||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokerLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
||||
client, err := NewClient(addrs, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
errors: make(chan *ProducerError),
|
||||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
go withRecover(p.dispatcher)
|
||||
go withRecover(p.retryHandler)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type flagSet int8
|
||||
|
||||
const (
|
||||
chaser flagSet = 1 << iota // message is last in a group that failed
|
||||
shutdown // start the shutdown process
|
||||
)
|
||||
|
||||
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
|
||||
type ProducerMessage struct {
|
||||
Topic string // The Kafka topic for this message.
|
||||
Key Encoder // The partitioning key for this message. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
|
||||
Value Encoder // The actual message to store in Kafka. It must implement the Encoder interface. Pre-existing Encoders include StringEncoder and ByteEncoder.
|
||||
|
||||
// These are filled in by the producer as the message is processed
|
||||
Offset int64 // Offset is the offset of the message stored on the broker. This is only guaranteed to be defined if the message was successfully delivered and RequiredAcks is not NoResponse.
|
||||
Partition int32 // Partition is the partition that the message was sent to. This is only guaranteed to be defined if the message was successfully delivered.
|
||||
|
||||
Metadata interface{} // This field is used to hold arbitrary data you wish to include so it will be available when receiving on the Successes and Errors channels. Sarama completely ignores this field and is only to be used for pass-through data.
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := 26 // the metadata overhead of CRC, flags, etc.
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
if m.Value != nil {
|
||||
size += m.Value.Length()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
// It contains the original ProducerMessage as well as the actual error value.
|
||||
type ProducerError struct {
|
||||
Msg *ProducerMessage
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ProducerError) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
type ProducerErrors []*ProducerError
|
||||
|
||||
func (pe ProducerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Errors() <-chan *ProducerError {
|
||||
return p.errors
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
|
||||
return p.successes
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Input() chan<- *ProducerMessage {
|
||||
return p.input
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Close() error {
|
||||
p.AsyncClose()
|
||||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for _ = range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var errors ProducerErrors
|
||||
if p.conf.Producer.Return.Errors {
|
||||
for event := range p.errors {
|
||||
errors = append(errors, event)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *asyncProducer) AsyncClose() {
|
||||
go withRecover(p.shutdown)
|
||||
}
|
||||
|
||||
// singleton
|
||||
// dispatches messages by topic
|
||||
func (p *asyncProducer) dispatcher() {
|
||||
handlers := make(map[string]chan<- *ProducerMessage)
|
||||
shuttingDown := false
|
||||
|
||||
for msg := range p.input {
|
||||
if msg == nil {
|
||||
Logger.Println("Something tried to send a nil message, it was ignored.")
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&shutdown != 0 {
|
||||
shuttingDown = true
|
||||
p.inFlight.Done()
|
||||
continue
|
||||
} else if msg.retries == 0 {
|
||||
if shuttingDown {
|
||||
// we can't just call returnError here because that decrements the wait group,
|
||||
// which hasn't been incremented yet for this message, and shouldn't be
|
||||
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if (p.conf.Producer.Compression == CompressionNone && msg.Value != nil && msg.Value.Length() > p.conf.Producer.MaxMessageBytes) ||
|
||||
(msg.byteSize() > p.conf.Producer.MaxMessageBytes) {
|
||||
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
||||
handler := handlers[msg.Topic]
|
||||
if handler == nil {
|
||||
handler = p.newTopicProducer(msg.Topic)
|
||||
handlers[msg.Topic] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// one per topic
|
||||
// partitions messages, then dispatches them by partition
|
||||
type topicProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
breaker *breaker.Breaker
|
||||
handlers map[int32]chan<- *ProducerMessage
|
||||
partitioner Partitioner
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
tp := &topicProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
input: input,
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
handlers: make(map[int32]chan<- *ProducerMessage),
|
||||
partitioner: p.conf.Producer.Partitioner(topic),
|
||||
}
|
||||
go withRecover(tp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (tp *topicProducer) dispatch() {
|
||||
for msg := range tp.input {
|
||||
if msg.retries == 0 {
|
||||
if err := tp.partitionMessage(msg); err != nil {
|
||||
tp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
handler := tp.handlers[msg.Partition]
|
||||
if handler == nil {
|
||||
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
|
||||
tp.handlers[msg.Partition] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range tp.handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
||||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
if tp.partitioner.RequiresConsistency() {
|
||||
partitions, err = tp.parent.client.Partitions(msg.Topic)
|
||||
} else {
|
||||
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numPartitions := int32(len(partitions))
|
||||
|
||||
if numPartitions == 0 {
|
||||
return ErrLeaderNotAvailable
|
||||
}
|
||||
|
||||
choice, err := tp.partitioner.Partition(msg, numPartitions)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if choice < 0 || choice >= numPartitions {
|
||||
return ErrInvalidPartition
|
||||
}
|
||||
|
||||
msg.Partition = partitions[choice]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// one per partition per topic
|
||||
// dispatches messages to the appropriate broker
|
||||
// also responsible for maintaining message order during retries
|
||||
type partitionProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
// retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and
|
||||
// therefore whether our buffer is complete and safe to flush)
|
||||
highWatermark int
|
||||
retryState []partitionRetryState
|
||||
}
|
||||
|
||||
type partitionRetryState struct {
|
||||
buf []*ProducerMessage
|
||||
expectChaser bool
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
pp := &partitionProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
input: input,
|
||||
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
|
||||
}
|
||||
go withRecover(pp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
}
|
||||
|
||||
for msg := range pp.input {
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser)
|
||||
if msg.flags&chaser == chaser {
|
||||
pp.retryState[msg.retries].expectChaser = false
|
||||
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
|
||||
} else {
|
||||
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
|
||||
}
|
||||
continue
|
||||
} else if msg.flags&chaser == chaser {
|
||||
// this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set,
|
||||
// meaning this retry level is done and we can go down (at least) one level and flush that
|
||||
pp.retryState[pp.highWatermark].expectChaser = false
|
||||
pp.flushRetryBuffers()
|
||||
pp.parent.inFlight.Done() // this chaser is now handled and will be garbage collected
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) newHighWatermark(hwm int) {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
|
||||
pp.highWatermark = hwm
|
||||
|
||||
// send off a chaser so that we know when everything "in between" has made it
|
||||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: chaser, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
pp.retryState[pp.highWatermark].buf = nil
|
||||
if pp.retryState[pp.highWatermark].expectChaser {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
break
|
||||
} else if pp.highWatermark == 0 {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) updateLeader() error {
|
||||
return pp.breaker.Run(func() (err error) {
|
||||
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker, constructs both an aggregator and a flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage)
|
||||
bridge := make(chan []*ProducerMessage)
|
||||
|
||||
a := &aggregator{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: input,
|
||||
output: bridge,
|
||||
}
|
||||
go withRecover(a.run)
|
||||
|
||||
f := &flusher{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: bridge,
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
go withRecover(f.run)
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
// groups messages together into appropriately-sized batches for sending to the broker
|
||||
// based on https://godoc.org/github.com/eapache/channels#BatchingChannel
|
||||
type aggregator struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
input <-chan *ProducerMessage
|
||||
output chan<- []*ProducerMessage
|
||||
|
||||
buffer []*ProducerMessage
|
||||
bufferBytes int
|
||||
timer <-chan time.Time
|
||||
}
|
||||
|
||||
func (a *aggregator) run() {
|
||||
var output chan<- []*ProducerMessage
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-a.input:
|
||||
if msg == nil {
|
||||
goto shutdown
|
||||
}
|
||||
|
||||
if a.wouldOverflow(msg) {
|
||||
Logger.Printf("producer/aggregator/%d maximum request accumulated, forcing blocking flush\n", a.broker.ID())
|
||||
a.output <- a.buffer
|
||||
a.reset()
|
||||
output = nil
|
||||
}
|
||||
|
||||
a.buffer = append(a.buffer, msg)
|
||||
a.bufferBytes += msg.byteSize()
|
||||
|
||||
if a.readyToFlush(msg) {
|
||||
output = a.output
|
||||
} else if a.parent.conf.Producer.Flush.Frequency > 0 && a.timer == nil {
|
||||
a.timer = time.After(a.parent.conf.Producer.Flush.Frequency)
|
||||
}
|
||||
case <-a.timer:
|
||||
output = a.output
|
||||
case output <- a.buffer:
|
||||
a.reset()
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
|
||||
shutdown:
|
||||
if len(a.buffer) > 0 {
|
||||
a.output <- a.buffer
|
||||
}
|
||||
close(a.output)
|
||||
}
|
||||
|
||||
func (a *aggregator) wouldOverflow(msg *ProducerMessage) bool {
|
||||
switch {
|
||||
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
|
||||
case a.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
|
||||
return true
|
||||
// Would we overflow the size-limit of a compressed message-batch?
|
||||
case a.parent.conf.Producer.Compression != CompressionNone && a.bufferBytes+msg.byteSize() >= a.parent.conf.Producer.MaxMessageBytes:
|
||||
return true
|
||||
// Would we overflow simply in number of messages?
|
||||
case a.parent.conf.Producer.Flush.MaxMessages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.MaxMessages:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (a *aggregator) readyToFlush(msg *ProducerMessage) bool {
|
||||
switch {
|
||||
// If all three config values are 0, we always flush as-fast-as-possible
|
||||
case a.parent.conf.Producer.Flush.Frequency == 0 && a.parent.conf.Producer.Flush.Bytes == 0 && a.parent.conf.Producer.Flush.Messages == 0:
|
||||
return true
|
||||
// If the messages is a chaser we must flush to maintain the state-machine
|
||||
case msg.flags&chaser == chaser:
|
||||
return true
|
||||
// If we've passed the message trigger-point
|
||||
case a.parent.conf.Producer.Flush.Messages > 0 && len(a.buffer) >= a.parent.conf.Producer.Flush.Messages:
|
||||
return true
|
||||
// If we've passed the byte trigger-point
|
||||
case a.parent.conf.Producer.Flush.Bytes > 0 && a.bufferBytes >= a.parent.conf.Producer.Flush.Bytes:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (a *aggregator) reset() {
|
||||
a.timer = nil
|
||||
a.buffer = nil
|
||||
a.bufferBytes = 0
|
||||
}
|
||||
|
||||
// takes a batch at a time from the aggregator and sends to the broker
|
||||
type flusher struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
input <-chan []*ProducerMessage
|
||||
|
||||
currentRetries map[string]map[int32]error
|
||||
}
|
||||
|
||||
func (f *flusher) run() {
|
||||
var closing error
|
||||
|
||||
Logger.Printf("producer/flusher/%d starting up\n", f.broker.ID())
|
||||
|
||||
for batch := range f.input {
|
||||
if closing != nil {
|
||||
f.parent.retryMessages(batch, closing)
|
||||
continue
|
||||
}
|
||||
|
||||
msgSets := f.groupAndFilter(batch)
|
||||
request := f.parent.buildRequest(msgSets)
|
||||
if request == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
response, err := f.broker.Produce(request)
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
break
|
||||
case PacketEncodingError:
|
||||
f.parent.returnErrors(batch, err)
|
||||
continue
|
||||
default:
|
||||
Logger.Printf("producer/flusher/%d state change to [closing] because %s\n", f.broker.ID(), err)
|
||||
f.parent.abandonBrokerConnection(f.broker)
|
||||
_ = f.broker.Close()
|
||||
closing = err
|
||||
f.parent.retryMessages(batch, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
f.parent.returnSuccesses(batch)
|
||||
continue
|
||||
}
|
||||
|
||||
f.parseResponse(msgSets, response)
|
||||
}
|
||||
Logger.Printf("producer/flusher/%d shut down\n", f.broker.ID())
|
||||
}
|
||||
|
||||
func (f *flusher) groupAndFilter(batch []*ProducerMessage) map[string]map[int32][]*ProducerMessage {
|
||||
msgSets := make(map[string]map[int32][]*ProducerMessage)
|
||||
|
||||
for i, msg := range batch {
|
||||
|
||||
if f.currentRetries[msg.Topic] != nil && f.currentRetries[msg.Topic][msg.Partition] != nil {
|
||||
// we're currently retrying this partition so we need to filter out this message
|
||||
f.parent.retryMessages([]*ProducerMessage{msg}, f.currentRetries[msg.Topic][msg.Partition])
|
||||
batch[i] = nil
|
||||
|
||||
if msg.flags&chaser == chaser {
|
||||
// ...but now we can start processing future messages again
|
||||
Logger.Printf("producer/flusher/%d state change to [normal] on %s/%d\n",
|
||||
f.broker.ID(), msg.Topic, msg.Partition)
|
||||
delete(f.currentRetries[msg.Topic], msg.Partition)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
partitionSet := msgSets[msg.Topic]
|
||||
if partitionSet == nil {
|
||||
partitionSet = make(map[int32][]*ProducerMessage)
|
||||
msgSets[msg.Topic] = partitionSet
|
||||
}
|
||||
|
||||
partitionSet[msg.Partition] = append(partitionSet[msg.Partition], msg)
|
||||
}
|
||||
|
||||
return msgSets
|
||||
}
|
||||
|
||||
func (f *flusher) parseResponse(msgSets map[string]map[int32][]*ProducerMessage, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
for topic, partitionSet := range msgSets {
|
||||
for partition, msgs := range partitionSet {
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
f.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
continue
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
// Success
|
||||
case ErrNoError:
|
||||
for i := range msgs {
|
||||
msgs[i].Offset = block.Offset + int64(i)
|
||||
}
|
||||
f.parent.returnSuccesses(msgs)
|
||||
// Retriable errors
|
||||
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/flusher/%d state change to [retrying] on %s/%d because %v\n",
|
||||
f.broker.ID(), topic, partition, block.Err)
|
||||
if f.currentRetries[topic] == nil {
|
||||
f.currentRetries[topic] = make(map[int32]error)
|
||||
}
|
||||
f.currentRetries[topic][partition] = block.Err
|
||||
f.parent.retryMessages(msgs, block.Err)
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
f.parent.returnErrors(msgs, block.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// singleton
|
||||
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
|
||||
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
|
||||
func (p *asyncProducer) retryHandler() {
|
||||
var msg *ProducerMessage
|
||||
buf := queue.New()
|
||||
|
||||
for {
|
||||
if buf.Length() == 0 {
|
||||
msg = <-p.retries
|
||||
} else {
|
||||
select {
|
||||
case msg = <-p.retries:
|
||||
case p.input <- buf.Peek().(*ProducerMessage):
|
||||
buf.Remove()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf.Add(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
func (p *asyncProducer) shutdown() {
|
||||
Logger.Println("Producer shutting down.")
|
||||
p.inFlight.Add(1)
|
||||
p.input <- &ProducerMessage{flags: shutdown}
|
||||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
close(p.retries)
|
||||
close(p.errors)
|
||||
close(p.successes)
|
||||
}
|
||||
|
||||
func (p *asyncProducer) buildRequest(batch map[string]map[int32][]*ProducerMessage) *ProduceRequest {
|
||||
|
||||
req := &ProduceRequest{RequiredAcks: p.conf.Producer.RequiredAcks, Timeout: int32(p.conf.Producer.Timeout / time.Millisecond)}
|
||||
empty := true
|
||||
|
||||
for topic, partitionSet := range batch {
|
||||
for partition, msgSet := range partitionSet {
|
||||
setToSend := new(MessageSet)
|
||||
setSize := 0
|
||||
for _, msg := range msgSet {
|
||||
var keyBytes, valBytes []byte
|
||||
var err error
|
||||
if msg.Key != nil {
|
||||
if keyBytes, err = msg.Key.Encode(); err != nil {
|
||||
p.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if msg.Value != nil {
|
||||
if valBytes, err = msg.Value.Encode(); err != nil {
|
||||
p.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if p.conf.Producer.Compression != CompressionNone && setSize+msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
// compression causes message-sets to be wrapped as single messages, which have tighter
|
||||
// size requirements, so we have to respect those limits
|
||||
valBytes, err := encode(setToSend)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
}
|
||||
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
|
||||
setToSend = new(MessageSet)
|
||||
setSize = 0
|
||||
}
|
||||
setSize += msg.byteSize()
|
||||
|
||||
setToSend.addMessage(&Message{Codec: CompressionNone, Key: keyBytes, Value: valBytes})
|
||||
empty = false
|
||||
}
|
||||
|
||||
if p.conf.Producer.Compression == CompressionNone {
|
||||
req.AddSet(topic, partition, setToSend)
|
||||
} else {
|
||||
valBytes, err := encode(setToSend)
|
||||
if err != nil {
|
||||
Logger.Println(err) // if this happens, it's basically our fault.
|
||||
panic(err)
|
||||
}
|
||||
req.AddMessage(topic, partition, &Message{Codec: p.conf.Producer.Compression, Key: nil, Value: valBytes})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if empty {
|
||||
return nil
|
||||
}
|
||||
return req
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
if msg != nil {
|
||||
p.returnError(msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
|
||||
for _, msg := range batch {
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
if p.conf.Producer.Return.Successes {
|
||||
msg.clear()
|
||||
p.successes <- msg
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, err)
|
||||
} else {
|
||||
msg.retries++
|
||||
p.retries <- msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bp := p.brokers[broker]
|
||||
|
||||
if bp == nil {
|
||||
bp = p.newBrokerProducer(broker)
|
||||
p.brokers[broker] = bp
|
||||
p.brokerRefs[bp] = 0
|
||||
}
|
||||
|
||||
p.brokerRefs[bp]++
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
|
@ -1,743 +0,0 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const TestMessage = "ABC THE MESSAGE"
|
||||
|
||||
func closeProducer(t *testing.T, p AsyncProducer) {
|
||||
var wg sync.WaitGroup
|
||||
p.AsyncClose()
|
||||
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
for _ = range p.Successes() {
|
||||
t.Error("Unexpected message on Successes()")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
for msg := range p.Errors() {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func expectResults(t *testing.T, p AsyncProducer, successes, errors int) {
|
||||
for successes > 0 || errors > 0 {
|
||||
select {
|
||||
case msg := <-p.Errors():
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
errors--
|
||||
if errors < 0 {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case msg := <-p.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
successes--
|
||||
if successes < 0 {
|
||||
t.Error("Too many successes")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testPartitioner chan *int32
|
||||
|
||||
func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) {
|
||||
part := <-p
|
||||
if part == nil {
|
||||
return 0, errors.New("BOOM")
|
||||
}
|
||||
|
||||
return *part, nil
|
||||
}
|
||||
|
||||
func (p testPartitioner) RequiresConsistency() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p testPartitioner) feed(partition int32) {
|
||||
p <- &partition
|
||||
}
|
||||
|
||||
func TestAsyncProducer(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i}
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
t.Error(msg.Err)
|
||||
if msg.Msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
case msg := <-producer.Successes():
|
||||
if msg.flags != 0 {
|
||||
t.Error("Message had flags set")
|
||||
}
|
||||
if msg.Metadata.(int) != i {
|
||||
t.Error("Message metadata did not match")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleFlushes(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for flush := 0; flush < 3; flush++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleBrokers(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader0 := newMockBroker(t, 2)
|
||||
leader1 := newMockBroker(t, 3)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID())
|
||||
metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse0 := new(ProduceResponse)
|
||||
prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader0.Returns(prodResponse0)
|
||||
|
||||
prodResponse1 := new(ProduceResponse)
|
||||
prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader1.Returns(prodResponse1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader1.Close()
|
||||
leader0.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerCustomPartitioner(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodResponse := new(ProduceResponse)
|
||||
prodResponse.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 2
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Partitioner = func(topic string) Partitioner {
|
||||
p := make(testPartitioner)
|
||||
go func() {
|
||||
p.feed(0)
|
||||
p <- nil
|
||||
p <- nil
|
||||
p <- nil
|
||||
p.feed(0)
|
||||
}()
|
||||
return p
|
||||
}
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
expectResults(t, producer, 2, 3)
|
||||
|
||||
closeProducer(t, producer)
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFailureRetry(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader1 := newMockBroker(t, 2)
|
||||
leader2 := newMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
seedBroker.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
leader1.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
leader1.Close()
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
// If a Kafka broker becomes unavailable and then returns back in service, then
|
||||
// producer reconnects to it and continues sending messages.
|
||||
func TestAsyncProducerBrokerBounce(t *testing.T) {
|
||||
// Given
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 1
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// When: a broker connection gets reset by a broker (network glitch, restart, you name it).
|
||||
leader.Close() // producer should get EOF
|
||||
leader = newMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles
|
||||
seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again
|
||||
|
||||
// Then: a produced message goes through the new broker connection.
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader1 := newMockBroker(t, 2)
|
||||
leader2 := newMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 3
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader1.Close() // producer should get EOF
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down
|
||||
|
||||
// ok fine, tell it to go to leader2 finally
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
seedBroker.Close()
|
||||
leader2.Close()
|
||||
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerMultipleRetries(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader1 := newMockBroker(t, 2)
|
||||
leader2 := newMockBroker(t, 3)
|
||||
|
||||
metadataLeader1 := new(MetadataResponse)
|
||||
metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID())
|
||||
metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Max = 4
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader1.Returns(prodNotLeader)
|
||||
|
||||
metadataLeader2 := new(MetadataResponse)
|
||||
metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID())
|
||||
metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
leader2.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader1)
|
||||
leader1.Returns(prodNotLeader)
|
||||
seedBroker.Returns(metadataLeader2)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
leader2.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader1.Close()
|
||||
leader2.Close()
|
||||
closeProducer(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerOutOfRetries(t *testing.T) {
|
||||
t.Skip("Enable once bug #294 is fixed.")
|
||||
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
select {
|
||||
case msg := <-producer.Errors():
|
||||
if msg.Err != ErrNotLeaderForPartition {
|
||||
t.Error(msg.Err)
|
||||
}
|
||||
case <-producer.Successes():
|
||||
t.Error("Unexpected success")
|
||||
}
|
||||
}
|
||||
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
leader.Close()
|
||||
seedBroker.Close()
|
||||
safeClose(t, producer)
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
leaderAddr := leader.Addr()
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leaderAddr, leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewRoundRobinPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partition 0
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// prime partition 1
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// reboot the broker (the producer will get EOF on its existing connection)
|
||||
leader.Close()
|
||||
leader = newMockBrokerAddr(t, 2, leaderAddr)
|
||||
|
||||
// send another message on partition 0 to trigger the EOF and retry
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
prodSuccess = new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 1, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerFlusherRetryCondition(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataResponse := new(MetadataResponse)
|
||||
metadataResponse.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 5
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
config.Producer.Retry.Max = 1
|
||||
config.Producer.Partitioner = NewManualPartitioner
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// prime partitions
|
||||
for p := int32(0); p < 2; p++ {
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p}
|
||||
}
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", p, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 5, 0)
|
||||
}
|
||||
|
||||
// send more messages on partition 0
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
// tell partition 0 to go to that broker again
|
||||
seedBroker.Returns(metadataResponse)
|
||||
|
||||
// succeed this time
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// put five more through
|
||||
for i := 0; i < 5; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0}
|
||||
}
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 5, 0)
|
||||
|
||||
// shutdown
|
||||
closeProducer(t, producer)
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
}
|
||||
|
||||
func TestAsyncProducerRetryShutdown(t *testing.T) {
|
||||
seedBroker := newMockBroker(t, 1)
|
||||
leader := newMockBroker(t, 2)
|
||||
|
||||
metadataLeader := new(MetadataResponse)
|
||||
metadataLeader.AddBroker(leader.Addr(), leader.BrokerID())
|
||||
metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError)
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
config := NewConfig()
|
||||
config.Producer.Flush.Messages = 10
|
||||
config.Producer.Return.Successes = true
|
||||
config.Producer.Retry.Backoff = 0
|
||||
producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)}
|
||||
}
|
||||
producer.AsyncClose()
|
||||
time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in
|
||||
|
||||
producer.Input() <- &ProducerMessage{Topic: "FOO"}
|
||||
if err := <-producer.Errors(); err.Err != ErrShuttingDown {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
prodNotLeader := new(ProduceResponse)
|
||||
prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition)
|
||||
leader.Returns(prodNotLeader)
|
||||
|
||||
seedBroker.Returns(metadataLeader)
|
||||
|
||||
prodSuccess := new(ProduceResponse)
|
||||
prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError)
|
||||
leader.Returns(prodSuccess)
|
||||
expectResults(t, producer, 10, 0)
|
||||
|
||||
seedBroker.Close()
|
||||
leader.Close()
|
||||
|
||||
// wait for the async-closed producer to shut down fully
|
||||
for err := range producer.Errors() {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// This example shows how to use the producer while simultaneously
|
||||
// reading the Errors channel to know about any failures.
|
||||
func ExampleAsyncProducer_select() {
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := producer.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Trap SIGINT to trigger a shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var enqueued, errors int
|
||||
ProducerLoop:
|
||||
for {
|
||||
select {
|
||||
case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}:
|
||||
enqueued++
|
||||
case err := <-producer.Errors():
|
||||
log.Println("Failed to produce message", err)
|
||||
errors++
|
||||
case <-signals:
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors)
|
||||
}
|
||||
|
||||
// This example shows how to use the producer with separate goroutines
|
||||
// reading from the Successes and Errors channels. Note that in order
|
||||
// for the Successes channel to be populated, you have to set
|
||||
// config.Producer.Return.Successes to true.
|
||||
func ExampleAsyncProducer_goroutines() {
|
||||
config := NewConfig()
|
||||
config.Producer.Return.Successes = true
|
||||
producer, err := NewAsyncProducer([]string{"localhost:9092"}, config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trap SIGINT to trigger a graceful shutdown.
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, os.Interrupt)
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
enqueued, successes, errors int
|
||||
)
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _ = range producer.Successes() {
|
||||
successes++
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for err := range producer.Errors() {
|
||||
log.Println(err)
|
||||
errors++
|
||||
}
|
||||
}()
|
||||
|
||||
ProducerLoop:
|
||||
for {
|
||||
message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")}
|
||||
select {
|
||||
case producer.Input() <- message:
|
||||
enqueued++
|
||||
|
||||
case <-signals:
|
||||
producer.AsyncClose() // Trigger a shutdown of the producer.
|
||||
break ProducerLoop
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Printf("Successfully produced: %d; errors: %d\n", successes, errors)
|
||||
}
|
|
@ -1,385 +0,0 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
|
||||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
conn net.Conn
|
||||
connErr error
|
||||
lock sync.Mutex
|
||||
opened int32
|
||||
|
||||
responses chan responsePromise
|
||||
done chan bool
|
||||
}
|
||||
|
||||
type responsePromise struct {
|
||||
correlationID int32
|
||||
packets chan []byte
|
||||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targetting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
}
|
||||
|
||||
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
|
||||
// waiting for the connection to complete. This means that any subsequent operations on the broker will
|
||||
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
|
||||
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
|
||||
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
|
||||
func (b *Broker) Open(conf *Config) error {
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
err := conf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
|
||||
if b.conn != nil {
|
||||
b.lock.Unlock()
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, ErrAlreadyConnected)
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
go withRecover(func() {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
dialer := net.Dialer{
|
||||
Timeout: conf.Net.DialTimeout,
|
||||
KeepAlive: conf.Net.KeepAlive,
|
||||
}
|
||||
|
||||
if conf.Net.TLS.Enable {
|
||||
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
|
||||
} else {
|
||||
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
|
||||
}
|
||||
if b.connErr != nil {
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
|
||||
return
|
||||
}
|
||||
|
||||
b.conf = conf
|
||||
b.done = make(chan bool)
|
||||
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
|
||||
|
||||
if b.id >= 0 {
|
||||
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
|
||||
} else {
|
||||
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
|
||||
}
|
||||
go withRecover(b.responseReceiver)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connected returns true if the broker is connected and false otherwise. If the broker is not
|
||||
// connected but it had tried to connect, the error from that connection attempt is also returned.
|
||||
func (b *Broker) Connected() (bool, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
return b.conn != nil, b.connErr
|
||||
}
|
||||
|
||||
func (b *Broker) Close() error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
return ErrNotConnected
|
||||
}
|
||||
|
||||
close(b.responses)
|
||||
<-b.done
|
||||
|
||||
err := b.conn.Close()
|
||||
|
||||
b.conn = nil
|
||||
b.connErr = nil
|
||||
b.done = nil
|
||||
b.responses = nil
|
||||
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
|
||||
func (b *Broker) ID() int32 {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
|
||||
func (b *Broker) Addr() string {
|
||||
return b.addr
|
||||
}
|
||||
|
||||
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
|
||||
response := new(MetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
|
||||
response := new(ConsumerMetadataResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
|
||||
response := new(OffsetResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
|
||||
var response *ProduceResponse
|
||||
var err error
|
||||
|
||||
if request.RequiredAcks == NoResponse {
|
||||
err = b.sendAndReceive(request, nil)
|
||||
} else {
|
||||
response = new(ProduceResponse)
|
||||
err = b.sendAndReceive(request, response)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
|
||||
response := new(FetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
|
||||
response := new(OffsetCommitResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
|
||||
response := new(OffsetFetchResponse)
|
||||
|
||||
err := b.sendAndReceive(request, response)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (b *Broker) send(rb requestBody, promiseResponse bool) (*responsePromise, error) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
if b.conn == nil {
|
||||
if b.connErr != nil {
|
||||
return nil, b.connErr
|
||||
}
|
||||
return nil, ErrNotConnected
|
||||
}
|
||||
|
||||
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
|
||||
buf, err := encode(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = b.conn.Write(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.correlationID++
|
||||
|
||||
if !promiseResponse {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
promise := responsePromise{req.correlationID, make(chan []byte), make(chan error)}
|
||||
b.responses <- promise
|
||||
|
||||
return &promise, nil
|
||||
}
|
||||
|
||||
func (b *Broker) sendAndReceive(req requestBody, res decoder) error {
|
||||
promise, err := b.send(req, res != nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if promise == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case buf := <-promise.packets:
|
||||
return decode(buf, res)
|
||||
case err = <-promise.errors:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Broker) decode(pd packetDecoder) (err error) {
|
||||
b.id, err = pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, err := pd.getString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := pd.getInt32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.addr = fmt.Sprint(host, ":", port)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) encode(pe packetEncoder) (err error) {
|
||||
|
||||
host, portstr, err := net.SplitHostPort(b.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
port, err := strconv.Atoi(portstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(b.id)
|
||||
|
||||
err = pe.putString(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pe.putInt32(int32(port))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Broker) responseReceiver() {
|
||||
header := make([]byte, 8)
|
||||
for response := range b.responses {
|
||||
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
|
||||
if err != nil {
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = io.ReadFull(b.conn, header)
|
||||
if err != nil {
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
decodedHeader := responseHeader{}
|
||||
err = decode(header, &decodedHeader)
|
||||
if err != nil {
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
if decodedHeader.correlationID != response.correlationID {
|
||||
// TODO if decoded ID < cur ID, discard until we catch up
|
||||
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
|
||||
response.errors <- PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
|
||||
continue
|
||||
}
|
||||
|
||||
buf := make([]byte, decodedHeader.length-4)
|
||||
_, err = io.ReadFull(b.conn, buf)
|
||||
if err != nil {
|
||||
// XXX: the above ReadFull call inherits the same ReadDeadline set at the top of this loop, so it may
|
||||
// fail with a timeout error. If this happens, our connection is permanently toast since we will no longer
|
||||
// be aligned correctly on the stream (we'll be reading garbage Kafka headers from the middle of data).
|
||||
// Can we/should we fail harder in that case?
|
||||
response.errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
response.packets <- buf
|
||||
}
|
||||
close(b.done)
|
||||
}
|
|
@ -1,177 +0,0 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func ExampleBroker() error {
|
||||
broker := NewBroker("localhost:9092")
|
||||
err := broker.Open(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request := MetadataRequest{Topics: []string{"myTopic"}}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
_ = broker.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("There are", len(response.Topics), "topics active in the cluster.")
|
||||
|
||||
return broker.Close()
|
||||
}
|
||||
|
||||
type mockEncoder struct {
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func (m mockEncoder) encode(pe packetEncoder) error {
|
||||
return pe.putRawBytes(m.bytes)
|
||||
}
|
||||
|
||||
func TestBrokerAccessors(t *testing.T) {
|
||||
broker := NewBroker("abc:123")
|
||||
|
||||
if broker.ID() != -1 {
|
||||
t.Error("New broker didn't have an ID of -1.")
|
||||
}
|
||||
|
||||
if broker.Addr() != "abc:123" {
|
||||
t.Error("New broker didn't have the correct address")
|
||||
}
|
||||
|
||||
broker.id = 34
|
||||
if broker.ID() != 34 {
|
||||
t.Error("Manually setting broker ID did not take effect.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleBrokerCommunication(t *testing.T) {
|
||||
mb := newMockBroker(t, 0)
|
||||
defer mb.Close()
|
||||
|
||||
broker := NewBroker(mb.Addr())
|
||||
err := broker.Open(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, tt := range brokerTestTable {
|
||||
mb.Returns(&mockEncoder{tt.response})
|
||||
}
|
||||
for _, tt := range brokerTestTable {
|
||||
tt.runner(t, broker)
|
||||
}
|
||||
|
||||
err = broker.Close()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake
|
||||
var brokerTestTable = []struct {
|
||||
response []byte
|
||||
runner func(*testing.T, *Broker)
|
||||
}{
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := MetadataRequest{}
|
||||
response, err := broker.GetMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ConsumerMetadataRequest{}
|
||||
response, err := broker.GetConsumerMetadata(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Consumer Metadata request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = NoResponse
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response != nil {
|
||||
t.Error("Produce request with NoResponse got a response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := ProduceRequest{}
|
||||
request.RequiredAcks = WaitForLocal
|
||||
response, err := broker.Produce(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Produce request without NoResponse got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := FetchRequest{}
|
||||
response, err := broker.Fetch(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Fetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetFetchRequest{}
|
||||
response, err := broker.FetchOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetFetch request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetCommitRequest{}
|
||||
response, err := broker.CommitOffset(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("OffsetCommit request got no response!")
|
||||
}
|
||||
}},
|
||||
|
||||
{[]byte{0x00, 0x00, 0x00, 0x00},
|
||||
func(t *testing.T, broker *Broker) {
|
||||
request := OffsetRequest{}
|
||||
response, err := broker.GetAvailableOffsets(&request)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if response == nil {
|
||||
t.Error("Offset request got no response!")
|
||||
}
|
||||
}},
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue