FreeBSD compatibility
- Use gopsutils istead of gosigar - Bump go-dockerclient closes #372
This commit is contained in:
parent
b10b186cc8
commit
375045953f
|
@ -8,6 +8,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
|
|||
- Redis plugin tag has changed from `host` to `server`
|
||||
- HAProxy plugin tag has changed from `host` to `server`
|
||||
- UDP output now supported
|
||||
- Telegraf will now compile on FreeBSD
|
||||
|
||||
### Features
|
||||
- [#325](https://github.com/influxdb/telegraf/pull/325): NSQ output. Thanks @jrxFive!
|
||||
|
@ -18,6 +19,7 @@ changed to just run docker commands in the Makefile. See `make docker-run` and
|
|||
- [#365](https://github.com/influxdb/telegraf/pull/365): Twemproxy plugin by @codeb2cc
|
||||
- [#317](https://github.com/influxdb/telegraf/issues/317): ZFS plugin, thanks @cornerot!
|
||||
- [#364](https://github.com/influxdb/telegraf/pull/364): Support InfluxDB UDP output.
|
||||
- [#372](https://github.com/influxdb/telegraf/pull/372): Remove gosigar and update go-dockerclient for FreeBSD support. Thanks @MerlinDMC!
|
||||
|
||||
### Bugfixes
|
||||
- [#331](https://github.com/influxdb/telegraf/pull/331): Dont overwrite host tag in redis plugin.
|
||||
|
|
|
@ -39,11 +39,6 @@
|
|||
"ImportPath": "github.com/cenkalti/backoff",
|
||||
"Rev": "4dc77674aceaabba2c7e3da25d4c823edfb73f99"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cloudfoundry/gosigar",
|
||||
"Comment": "scotty_09012012-27-g3ed7c74",
|
||||
"Rev": "3ed7c74352dae6dc00bdc8c74045375352e3ec05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/dancannon/gorethink/encoding",
|
||||
"Comment": "v1.x.x-1-g786f12a",
|
||||
|
@ -71,7 +66,7 @@
|
|||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "af9789bbd78acf3e279274caa54682185eb7ed33"
|
||||
"Rev": "ef410296f87750305e1e1acf9ad2ba3833dcb004"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-sql-driver/mysql",
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
.vagrant
|
|
@ -1,8 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.2
|
||||
|
||||
install:
|
||||
- 'go install github.com/onsi/ginkgo/ginkgo'
|
||||
script: 'ginkgo -r'
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,9 +0,0 @@
|
|||
Copyright (c) [2009-2011] VMware, Inc. All Rights Reserved.
|
||||
|
||||
This product is licensed to you under the Apache License, Version 2.0 (the "License").
|
||||
You may not use this product except in compliance with the License.
|
||||
|
||||
This product includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of these
|
||||
subcomponents is subject to the terms and conditions of the
|
||||
subcomponent's license, as noted in the LICENSE file.
|
|
@ -1,22 +0,0 @@
|
|||
# Go sigar
|
||||
|
||||
## Overview
|
||||
|
||||
Go sigar is a golang implementation of the
|
||||
[sigar API](https://github.com/hyperic/sigar). The Go version of
|
||||
sigar has a very similar interface, but is being written from scratch
|
||||
in pure go/cgo, rather than cgo bindings for libsigar.
|
||||
|
||||
## Test drive
|
||||
|
||||
$ go get github.com/cloudfoundry/gosigar
|
||||
$ cd $GOPATH/src/github.com/cloudfoundry/gosigar/examples
|
||||
$ go run uptime.go
|
||||
|
||||
## Supported platforms
|
||||
|
||||
Currently targeting modern flavors of darwin and linux.
|
||||
|
||||
## License
|
||||
|
||||
Apache 2.0
|
|
@ -1,25 +0,0 @@
|
|||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "hashicorp/precise64"
|
||||
config.vm.provision "shell", inline: "mkdir -p /home/vagrant/go"
|
||||
config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/cloudfoundry/gosigar"
|
||||
config.vm.provision "shell", inline: "chown -R vagrant:vagrant /home/vagrant/go"
|
||||
install_go = <<-BASH
|
||||
set -e
|
||||
|
||||
if [ ! -d "/usr/local/go" ]; then
|
||||
cd /tmp && wget https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
|
||||
cd /usr/local
|
||||
tar xvzf /tmp/go1.3.3.linux-amd64.tar.gz
|
||||
echo 'export GOPATH=/home/vagrant/go; export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin' >> /home/vagrant/.bashrc
|
||||
fi
|
||||
export GOPATH=/home/vagrant/go
|
||||
export PATH=/usr/local/go/bin:$PATH:$GOPATH/bin
|
||||
/usr/local/go/bin/go get -u github.com/onsi/ginkgo/ginkgo
|
||||
/usr/local/go/bin/go get -u github.com/onsi/gomega;
|
||||
BASH
|
||||
config.vm.provision "shell", inline: 'apt-get install -y git-core'
|
||||
config.vm.provision "shell", inline: install_go
|
||||
end
|
|
@ -1,69 +0,0 @@
|
|||
package sigar
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ConcreteSigar struct{}
|
||||
|
||||
func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) {
|
||||
// samplesCh is buffered to 1 value to immediately return first CPU sample
|
||||
samplesCh := make(chan Cpu, 1)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
var cpuUsage Cpu
|
||||
|
||||
// Immediately provide non-delta value.
|
||||
// samplesCh is buffered to 1 value, so it will not block.
|
||||
cpuUsage.Get()
|
||||
samplesCh <- cpuUsage
|
||||
|
||||
ticker := time.NewTicker(collectionInterval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
previousCpuUsage := cpuUsage
|
||||
|
||||
cpuUsage.Get()
|
||||
|
||||
select {
|
||||
case samplesCh <- cpuUsage.Delta(previousCpuUsage):
|
||||
default:
|
||||
// Include default to avoid channel blocking
|
||||
}
|
||||
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return samplesCh, stopCh
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) {
|
||||
l := LoadAverage{}
|
||||
err := l.Get()
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetMem() (Mem, error) {
|
||||
m := Mem{}
|
||||
err := m.Get()
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetSwap() (Swap, error) {
|
||||
s := Swap{}
|
||||
err := s.Get()
|
||||
return s, err
|
||||
}
|
||||
|
||||
func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) {
|
||||
f := FileSystemUsage{}
|
||||
err := f.Get(path)
|
||||
return f, err
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
package sigar_test
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
sigar "github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
var _ = Describe("ConcreteSigar", func() {
|
||||
var concreteSigar *sigar.ConcreteSigar
|
||||
|
||||
BeforeEach(func() {
|
||||
concreteSigar = &sigar.ConcreteSigar{}
|
||||
})
|
||||
|
||||
Describe("CollectCpuStats", func() {
|
||||
It("immediately makes first CPU usage available even though it's not very accurate", func() {
|
||||
samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond)
|
||||
|
||||
firstValue := <-samplesCh
|
||||
Expect(firstValue.User).To(BeNumerically(">", 0))
|
||||
|
||||
stop <- struct{}{}
|
||||
})
|
||||
|
||||
It("makes CPU usage delta values available", func() {
|
||||
samplesCh, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond)
|
||||
|
||||
firstValue := <-samplesCh
|
||||
|
||||
secondValue := <-samplesCh
|
||||
Expect(secondValue.User).To(BeNumerically("<", firstValue.User))
|
||||
|
||||
stop <- struct{}{}
|
||||
})
|
||||
|
||||
It("does not block", func() {
|
||||
_, stop := concreteSigar.CollectCpuStats(10 * time.Millisecond)
|
||||
|
||||
// Sleep long enough for samplesCh to fill at least 2 values
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
|
||||
stop <- struct{}{}
|
||||
|
||||
// If CollectCpuStats blocks it will never get here
|
||||
Expect(true).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
It("GetLoadAverage", func() {
|
||||
avg, err := concreteSigar.GetLoadAverage()
|
||||
Expect(avg.One).ToNot(BeNil())
|
||||
Expect(avg.Five).ToNot(BeNil())
|
||||
Expect(avg.Fifteen).ToNot(BeNil())
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("GetMem", func() {
|
||||
mem, err := concreteSigar.GetMem()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(mem.Total).To(BeNumerically(">", 0))
|
||||
Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total))
|
||||
})
|
||||
|
||||
It("GetSwap", func() {
|
||||
swap, err := concreteSigar.GetSwap()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total))
|
||||
})
|
||||
|
||||
It("GetSwap", func() {
|
||||
fsusage, err := concreteSigar.GetFileSystemUsage("/")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fsusage.Total).ToNot(BeNil())
|
||||
|
||||
fsusage, err = concreteSigar.GetFileSystemUsage("T O T A L L Y B O G U S")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(fsusage.Total).To(Equal(uint64(0)))
|
||||
})
|
||||
})
|
|
@ -1,52 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cpus := sigar.CpuList{}
|
||||
cpus.Get()
|
||||
tcpu := getOverallCpu(cpus)
|
||||
|
||||
for i, cpu := range cpus.List {
|
||||
fmt.Printf("CPU%d Ticks: %d\n", i, cpu.Total())
|
||||
}
|
||||
|
||||
fmt.Printf("Total CPU Ticks: %d\n", tcpu.Total())
|
||||
fmt.Printf("Total CPU Time: %d\n", tcpu.Total()/128)
|
||||
fmt.Printf("User CPU Time: %d\n", tcpu.User/128)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
tcpu2 := sigar.Cpu{}
|
||||
tcpu2.Get()
|
||||
|
||||
dcpu := tcpu2.Delta(tcpu)
|
||||
tcpuDelta := tcpu2.Total() - tcpu.Total()
|
||||
iPercentage := 100.0 * float64(dcpu.Idle) / float64(tcpuDelta)
|
||||
fmt.Printf("Idle percentage: %f\n", iPercentage)
|
||||
bPercentage := 100.0 * float64(busy(tcpu2)-busy(tcpu)) / float64(tcpuDelta)
|
||||
fmt.Printf("Busy percentage: %f\n", bPercentage)
|
||||
}
|
||||
|
||||
func busy(c sigar.Cpu) uint64 {
|
||||
return c.Total() - c.Idle
|
||||
}
|
||||
|
||||
func getOverallCpu(cl sigar.CpuList) sigar.Cpu {
|
||||
var overallCpu sigar.Cpu
|
||||
for _, c := range cl.List {
|
||||
overallCpu.User += c.User
|
||||
overallCpu.Nice += c.Nice
|
||||
overallCpu.Sys += c.Sys
|
||||
overallCpu.Idle += c.Idle
|
||||
overallCpu.Wait += c.Wait
|
||||
overallCpu.Irq += c.Irq
|
||||
overallCpu.SoftIrq += c.SoftIrq
|
||||
overallCpu.Stolen += c.Stolen
|
||||
}
|
||||
return overallCpu
|
||||
}
|
|
@ -1,39 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudfoundry/gosigar"
|
||||
"os"
|
||||
)
|
||||
|
||||
const output_format = "%-15s %4s %4s %5s %4s %-15s\n"
|
||||
|
||||
func formatSize(size uint64) string {
|
||||
return sigar.FormatSize(size * 1024)
|
||||
}
|
||||
|
||||
func main() {
|
||||
fslist := sigar.FileSystemList{}
|
||||
fslist.Get()
|
||||
|
||||
fmt.Fprintf(os.Stdout, output_format,
|
||||
"Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on")
|
||||
|
||||
for _, fs := range fslist.List {
|
||||
dir_name := fs.DirName
|
||||
|
||||
usage := sigar.FileSystemUsage{}
|
||||
|
||||
usage.Get(dir_name)
|
||||
|
||||
fmt.Fprintf(os.Stdout, output_format,
|
||||
fs.DevName,
|
||||
formatSize(usage.Total),
|
||||
formatSize(usage.Used),
|
||||
formatSize(usage.Avail),
|
||||
sigar.FormatPercent(usage.UsePercent()),
|
||||
dir_name)
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudfoundry/gosigar"
|
||||
"os"
|
||||
)
|
||||
|
||||
func format(val uint64) uint64 {
|
||||
return val / 1024
|
||||
}
|
||||
|
||||
func main() {
|
||||
mem := sigar.Mem{}
|
||||
swap := sigar.Swap{}
|
||||
|
||||
mem.Get()
|
||||
swap.Get()
|
||||
|
||||
fmt.Fprintf(os.Stdout, "%18s %10s %10s\n",
|
||||
"total", "used", "free")
|
||||
|
||||
fmt.Fprintf(os.Stdout, "Mem: %10d %10d %10d\n",
|
||||
format(mem.Total), format(mem.Used), format(mem.Free))
|
||||
|
||||
fmt.Fprintf(os.Stdout, "-/+ buffers/cache: %10d %10d\n",
|
||||
format(mem.ActualUsed), format(mem.ActualFree))
|
||||
|
||||
fmt.Fprintf(os.Stdout, "Swap: %10d %10d %10d\n",
|
||||
format(swap.Total), format(swap.Used), format(swap.Free))
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pids := sigar.ProcList{}
|
||||
pids.Get()
|
||||
|
||||
// ps -eo pid,ppid,stime,time,rss,state,comm
|
||||
fmt.Print(" PID PPID STIME TIME RSS S COMMAND\n")
|
||||
|
||||
for _, pid := range pids.List {
|
||||
state := sigar.ProcState{}
|
||||
mem := sigar.ProcMem{}
|
||||
time := sigar.ProcTime{}
|
||||
|
||||
if err := state.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := mem.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := time.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fmt.Printf("%5d %5d %s %s %6d %c %s\n",
|
||||
pid, state.Ppid,
|
||||
time.FormatStartTime(), time.FormatTotal(),
|
||||
mem.Resident/1024, state.State, state.Name)
|
||||
}
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudfoundry/gosigar"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
concreteSigar := sigar.ConcreteSigar{}
|
||||
|
||||
uptime := sigar.Uptime{}
|
||||
uptime.Get()
|
||||
avg, err := concreteSigar.GetLoadAverage()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get load average")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stdout, " %s up %s load average: %.2f, %.2f, %.2f\n",
|
||||
time.Now().Format("15:04:05"),
|
||||
uptime.Format(),
|
||||
avg.One, avg.Five, avg.Fifteen)
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package fakes
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
sigar "github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
type FakeSigar struct {
|
||||
LoadAverage sigar.LoadAverage
|
||||
LoadAverageErr error
|
||||
|
||||
Mem sigar.Mem
|
||||
MemErr error
|
||||
|
||||
Swap sigar.Swap
|
||||
SwapErr error
|
||||
|
||||
FileSystemUsage sigar.FileSystemUsage
|
||||
FileSystemUsageErr error
|
||||
FileSystemUsagePath string
|
||||
|
||||
CollectCpuStatsCpuCh chan sigar.Cpu
|
||||
CollectCpuStatsStopCh chan struct{}
|
||||
}
|
||||
|
||||
func NewFakeSigar() *FakeSigar {
|
||||
return &FakeSigar{
|
||||
CollectCpuStatsCpuCh: make(chan sigar.Cpu, 1),
|
||||
CollectCpuStatsStopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan sigar.Cpu, chan<- struct{}) {
|
||||
samplesCh := make(chan sigar.Cpu, 1)
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case cpuStat := <-f.CollectCpuStatsCpuCh:
|
||||
select {
|
||||
case samplesCh <- cpuStat:
|
||||
default:
|
||||
// Include default to avoid channel blocking
|
||||
}
|
||||
|
||||
case <-f.CollectCpuStatsStopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return samplesCh, stopCh
|
||||
}
|
||||
|
||||
func (f *FakeSigar) GetLoadAverage() (sigar.LoadAverage, error) {
|
||||
return f.LoadAverage, f.LoadAverageErr
|
||||
}
|
||||
|
||||
func (f *FakeSigar) GetMem() (sigar.Mem, error) {
|
||||
return f.Mem, f.MemErr
|
||||
}
|
||||
|
||||
func (f *FakeSigar) GetSwap() (sigar.Swap, error) {
|
||||
return f.Swap, f.SwapErr
|
||||
}
|
||||
|
||||
func (f *FakeSigar) GetFileSystemUsage(path string) (sigar.FileSystemUsage, error) {
|
||||
f.FileSystemUsagePath = path
|
||||
return f.FileSystemUsage, f.FileSystemUsageErr
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
# Process notifications for Go
|
||||
|
||||
## Overview
|
||||
|
||||
The psnotify package captures process events from the kernel via
|
||||
kqueue on Darwin/BSD and the netlink connector on Linux.
|
||||
|
||||
The psnotify API is similar to the
|
||||
[fsnotify](https://github.com/howeyc/fsnotify) package.
|
||||
|
||||
Example:
|
||||
```go
|
||||
watcher, err := psnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Process events
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Fork:
|
||||
log.Println("fork event:", ev)
|
||||
case ev := <-watcher.Exec:
|
||||
log.Println("exec event:", ev)
|
||||
case ev := <-watcher.Exit:
|
||||
log.Println("exit event:", ev)
|
||||
case err := <-watcher.Error:
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Watch(os.Getpid(), psnotify.PROC_EVENT_ALL)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
/* ... do stuff ... */
|
||||
watcher.Close()
|
||||
```
|
||||
|
||||
## Supported platforms
|
||||
|
||||
Currently targeting modern flavors of Darwin and Linux.
|
||||
Should work on BSD, but untested.
|
||||
|
||||
## License
|
||||
|
||||
Apache 2.0
|
|
@ -1,136 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package psnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ProcEventFork struct {
|
||||
ParentPid int // Pid of the process that called fork()
|
||||
ChildPid int // Child process pid created by fork()
|
||||
}
|
||||
|
||||
type ProcEventExec struct {
|
||||
Pid int // Pid of the process that called exec()
|
||||
}
|
||||
|
||||
type ProcEventExit struct {
|
||||
Pid int // Pid of the process that called exit()
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
flags uint32 // Saved value of Watch() flags param
|
||||
}
|
||||
|
||||
type eventListener interface {
|
||||
close() error // Watch.Close() closes the OS specific listener
|
||||
}
|
||||
|
||||
type Watcher struct {
|
||||
listener eventListener // OS specifics (kqueue or netlink)
|
||||
watches map[int]*watch // Map of watched process ids
|
||||
Error chan error // Errors are sent on this channel
|
||||
Fork chan *ProcEventFork // Fork events are sent on this channel
|
||||
Exec chan *ProcEventExec // Exec events are sent on this channel
|
||||
Exit chan *ProcEventExit // Exit events are sent on this channel
|
||||
done chan bool // Used to stop the readEvents() goroutine
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// Initialize event listener and channels
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
listener, err := createListener()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
listener: listener,
|
||||
watches: make(map[int]*watch),
|
||||
Fork: make(chan *ProcEventFork),
|
||||
Exec: make(chan *ProcEventExec),
|
||||
Exit: make(chan *ProcEventExit),
|
||||
Error: make(chan error),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close event channels when done message is received
|
||||
func (w *Watcher) finish() {
|
||||
close(w.Fork)
|
||||
close(w.Exec)
|
||||
close(w.Exit)
|
||||
close(w.Error)
|
||||
}
|
||||
|
||||
// Closes the OS specific event listener,
|
||||
// removes all watches and closes all event channels.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
for pid := range w.watches {
|
||||
w.RemoveWatch(pid)
|
||||
}
|
||||
|
||||
w.done <- true
|
||||
|
||||
w.listener.close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add pid to the watched process set.
|
||||
// The flags param is a bitmask of process events to capture,
|
||||
// must be one or more of: PROC_EVENT_FORK, PROC_EVENT_EXEC, PROC_EVENT_EXIT
|
||||
func (w *Watcher) Watch(pid int, flags uint32) error {
|
||||
if w.isClosed {
|
||||
return errors.New("psnotify watcher is closed")
|
||||
}
|
||||
|
||||
watchEntry, found := w.watches[pid]
|
||||
|
||||
if found {
|
||||
watchEntry.flags |= flags
|
||||
} else {
|
||||
if err := w.register(pid, flags); err != nil {
|
||||
return err
|
||||
}
|
||||
w.watches[pid] = &watch{flags: flags}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove pid from the watched process set.
|
||||
func (w *Watcher) RemoveWatch(pid int) error {
|
||||
_, ok := w.watches[pid]
|
||||
if !ok {
|
||||
msg := fmt.Sprintf("watch for pid=%d does not exist", pid)
|
||||
return errors.New(msg)
|
||||
}
|
||||
delete(w.watches, pid)
|
||||
return w.unregister(pid)
|
||||
}
|
||||
|
||||
// Internal helper to check if there is a message on the "done" channel.
|
||||
// The "done" message is sent by the Close() method; when received here,
|
||||
// the Watcher.finish method is called to close all channels and return
|
||||
// true - in which case the caller should break from the readEvents loop.
|
||||
func (w *Watcher) isDone() bool {
|
||||
var done bool
|
||||
select {
|
||||
case done = <-w.done:
|
||||
w.finish()
|
||||
default:
|
||||
}
|
||||
return done
|
||||
}
|
93
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go
generated
vendored
93
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_bsd.go
generated
vendored
|
@ -1,93 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
// +build darwin freebsd netbsd openbsd
|
||||
|
||||
// Go interface to BSD kqueue process events.
|
||||
package psnotify
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// Flags (from <sys/event.h>)
|
||||
PROC_EVENT_FORK = syscall.NOTE_FORK // fork() events
|
||||
PROC_EVENT_EXEC = syscall.NOTE_EXEC // exec() events
|
||||
PROC_EVENT_EXIT = syscall.NOTE_EXIT // exit() events
|
||||
|
||||
// Watch for all process events
|
||||
PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT
|
||||
)
|
||||
|
||||
type kqueueListener struct {
|
||||
kq int // The syscall.Kqueue() file descriptor
|
||||
buf [1]syscall.Kevent_t // An event buffer for Add/Remove watch
|
||||
}
|
||||
|
||||
// Initialize bsd implementation of the eventListener interface
|
||||
func createListener() (eventListener, error) {
|
||||
listener := &kqueueListener{}
|
||||
kq, err := syscall.Kqueue()
|
||||
listener.kq = kq
|
||||
return listener, err
|
||||
}
|
||||
|
||||
// Initialize Kevent_t fields and propagate changelist for the given pid
|
||||
func (w *Watcher) kevent(pid int, fflags uint32, flags int) error {
|
||||
listener, _ := w.listener.(*kqueueListener)
|
||||
event := &listener.buf[0]
|
||||
|
||||
syscall.SetKevent(event, pid, syscall.EVFILT_PROC, flags)
|
||||
event.Fflags = fflags
|
||||
|
||||
_, err := syscall.Kevent(listener.kq, listener.buf[:], nil, nil)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete filter for given pid from the queue
|
||||
func (w *Watcher) unregister(pid int) error {
|
||||
return w.kevent(pid, 0, syscall.EV_DELETE)
|
||||
}
|
||||
|
||||
// Add and enable filter for given pid in the queue
|
||||
func (w *Watcher) register(pid int, flags uint32) error {
|
||||
return w.kevent(pid, flags, syscall.EV_ADD|syscall.EV_ENABLE)
|
||||
}
|
||||
|
||||
// Poll the kqueue file descriptor and dispatch to the Event channels
|
||||
func (w *Watcher) readEvents() {
|
||||
listener, _ := w.listener.(*kqueueListener)
|
||||
events := make([]syscall.Kevent_t, 10)
|
||||
|
||||
for {
|
||||
if w.isDone() {
|
||||
return
|
||||
}
|
||||
|
||||
n, err := syscall.Kevent(listener.kq, nil, events, nil)
|
||||
if err != nil {
|
||||
w.Error <- err
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ev := range events[:n] {
|
||||
pid := int(ev.Ident)
|
||||
|
||||
switch ev.Fflags {
|
||||
case syscall.NOTE_FORK:
|
||||
w.Fork <- &ProcEventFork{ParentPid: pid}
|
||||
case syscall.NOTE_EXEC:
|
||||
w.Exec <- &ProcEventExec{Pid: pid}
|
||||
case syscall.NOTE_EXIT:
|
||||
w.RemoveWatch(pid)
|
||||
w.Exit <- &ProcEventExit{Pid: pid}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close our kqueue file descriptor; deletes any remaining filters
|
||||
func (listener *kqueueListener) close() error {
|
||||
return syscall.Close(listener.kq)
|
||||
}
|
253
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go
generated
vendored
253
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_linux.go
generated
vendored
|
@ -1,253 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
// Go interface to the Linux netlink process connector.
|
||||
// See Documentation/connector/connector.txt in the linux kernel source tree.
|
||||
package psnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// internal flags (from <linux/connector.h>)
|
||||
_CN_IDX_PROC = 0x1
|
||||
_CN_VAL_PROC = 0x1
|
||||
|
||||
// internal flags (from <linux/cn_proc.h>)
|
||||
_PROC_CN_MCAST_LISTEN = 1
|
||||
_PROC_CN_MCAST_IGNORE = 2
|
||||
|
||||
// Flags (from <linux/cn_proc.h>)
|
||||
PROC_EVENT_FORK = 0x00000001 // fork() events
|
||||
PROC_EVENT_EXEC = 0x00000002 // exec() events
|
||||
PROC_EVENT_EXIT = 0x80000000 // exit() events
|
||||
|
||||
// Watch for all process events
|
||||
PROC_EVENT_ALL = PROC_EVENT_FORK | PROC_EVENT_EXEC | PROC_EVENT_EXIT
|
||||
)
|
||||
|
||||
var (
|
||||
byteOrder = binary.LittleEndian
|
||||
)
|
||||
|
||||
// linux/connector.h: struct cb_id
|
||||
type cbId struct {
|
||||
Idx uint32
|
||||
Val uint32
|
||||
}
|
||||
|
||||
// linux/connector.h: struct cb_msg
|
||||
type cnMsg struct {
|
||||
Id cbId
|
||||
Seq uint32
|
||||
Ack uint32
|
||||
Len uint16
|
||||
Flags uint16
|
||||
}
|
||||
|
||||
// linux/cn_proc.h: struct proc_event.{what,cpu,timestamp_ns}
|
||||
type procEventHeader struct {
|
||||
What uint32
|
||||
Cpu uint32
|
||||
Timestamp uint64
|
||||
}
|
||||
|
||||
// linux/cn_proc.h: struct proc_event.fork
|
||||
type forkProcEvent struct {
|
||||
ParentPid uint32
|
||||
ParentTgid uint32
|
||||
ChildPid uint32
|
||||
ChildTgid uint32
|
||||
}
|
||||
|
||||
// linux/cn_proc.h: struct proc_event.exec
|
||||
type execProcEvent struct {
|
||||
ProcessPid uint32
|
||||
ProcessTgid uint32
|
||||
}
|
||||
|
||||
// linux/cn_proc.h: struct proc_event.exit
|
||||
type exitProcEvent struct {
|
||||
ProcessPid uint32
|
||||
ProcessTgid uint32
|
||||
ExitCode uint32
|
||||
ExitSignal uint32
|
||||
}
|
||||
|
||||
// standard netlink header + connector header
|
||||
type netlinkProcMessage struct {
|
||||
Header syscall.NlMsghdr
|
||||
Data cnMsg
|
||||
}
|
||||
|
||||
type netlinkListener struct {
|
||||
addr *syscall.SockaddrNetlink // Netlink socket address
|
||||
sock int // The syscall.Socket() file descriptor
|
||||
seq uint32 // struct cn_msg.seq
|
||||
}
|
||||
|
||||
// Initialize linux implementation of the eventListener interface
|
||||
func createListener() (eventListener, error) {
|
||||
listener := &netlinkListener{}
|
||||
err := listener.bind()
|
||||
return listener, err
|
||||
}
|
||||
|
||||
// noop on linux
|
||||
func (w *Watcher) unregister(pid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// noop on linux
|
||||
func (w *Watcher) register(pid int, flags uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read events from the netlink socket
|
||||
func (w *Watcher) readEvents() {
|
||||
buf := make([]byte, syscall.Getpagesize())
|
||||
|
||||
listener, _ := w.listener.(*netlinkListener)
|
||||
|
||||
for {
|
||||
if w.isDone() {
|
||||
return
|
||||
}
|
||||
|
||||
nr, _, err := syscall.Recvfrom(listener.sock, buf, 0)
|
||||
|
||||
if err != nil {
|
||||
w.Error <- err
|
||||
continue
|
||||
}
|
||||
if nr < syscall.NLMSG_HDRLEN {
|
||||
w.Error <- syscall.EINVAL
|
||||
continue
|
||||
}
|
||||
|
||||
msgs, _ := syscall.ParseNetlinkMessage(buf[:nr])
|
||||
|
||||
for _, m := range msgs {
|
||||
if m.Header.Type == syscall.NLMSG_DONE {
|
||||
w.handleEvent(m.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Internal helper to check if pid && event is being watched
|
||||
func (w *Watcher) isWatching(pid int, event uint32) bool {
|
||||
if watch, ok := w.watches[pid]; ok {
|
||||
return (watch.flags & event) == event
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Dispatch events from the netlink socket to the Event channels.
|
||||
// Unlike bsd kqueue, netlink receives events for all pids,
|
||||
// so we apply filtering based on the watch table via isWatching()
|
||||
func (w *Watcher) handleEvent(data []byte) {
|
||||
buf := bytes.NewBuffer(data)
|
||||
msg := &cnMsg{}
|
||||
hdr := &procEventHeader{}
|
||||
|
||||
binary.Read(buf, byteOrder, msg)
|
||||
binary.Read(buf, byteOrder, hdr)
|
||||
|
||||
switch hdr.What {
|
||||
case PROC_EVENT_FORK:
|
||||
event := &forkProcEvent{}
|
||||
binary.Read(buf, byteOrder, event)
|
||||
ppid := int(event.ParentTgid)
|
||||
pid := int(event.ChildTgid)
|
||||
|
||||
if w.isWatching(ppid, PROC_EVENT_EXEC) {
|
||||
// follow forks
|
||||
watch, _ := w.watches[ppid]
|
||||
w.Watch(pid, watch.flags)
|
||||
}
|
||||
|
||||
if w.isWatching(ppid, PROC_EVENT_FORK) {
|
||||
w.Fork <- &ProcEventFork{ParentPid: ppid, ChildPid: pid}
|
||||
}
|
||||
case PROC_EVENT_EXEC:
|
||||
event := &execProcEvent{}
|
||||
binary.Read(buf, byteOrder, event)
|
||||
pid := int(event.ProcessTgid)
|
||||
|
||||
if w.isWatching(pid, PROC_EVENT_EXEC) {
|
||||
w.Exec <- &ProcEventExec{Pid: pid}
|
||||
}
|
||||
case PROC_EVENT_EXIT:
|
||||
event := &exitProcEvent{}
|
||||
binary.Read(buf, byteOrder, event)
|
||||
pid := int(event.ProcessTgid)
|
||||
|
||||
if w.isWatching(pid, PROC_EVENT_EXIT) {
|
||||
w.RemoveWatch(pid)
|
||||
w.Exit <- &ProcEventExit{Pid: pid}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bind our netlink socket and
|
||||
// send a listen control message to the connector driver.
|
||||
func (listener *netlinkListener) bind() error {
|
||||
sock, err := syscall.Socket(
|
||||
syscall.AF_NETLINK,
|
||||
syscall.SOCK_DGRAM,
|
||||
syscall.NETLINK_CONNECTOR)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
listener.sock = sock
|
||||
listener.addr = &syscall.SockaddrNetlink{
|
||||
Family: syscall.AF_NETLINK,
|
||||
Groups: _CN_IDX_PROC,
|
||||
}
|
||||
|
||||
err = syscall.Bind(listener.sock, listener.addr)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return listener.send(_PROC_CN_MCAST_LISTEN)
|
||||
}
|
||||
|
||||
// Send an ignore control message to the connector driver
|
||||
// and close our netlink socket.
|
||||
func (listener *netlinkListener) close() error {
|
||||
err := listener.send(_PROC_CN_MCAST_IGNORE)
|
||||
syscall.Close(listener.sock)
|
||||
return err
|
||||
}
|
||||
|
||||
// Generic method for sending control messages to the connector
|
||||
// driver; where op is one of PROC_CN_MCAST_{LISTEN,IGNORE}
|
||||
func (listener *netlinkListener) send(op uint32) error {
|
||||
listener.seq++
|
||||
pr := &netlinkProcMessage{}
|
||||
plen := binary.Size(pr.Data) + binary.Size(op)
|
||||
pr.Header.Len = syscall.NLMSG_HDRLEN + uint32(plen)
|
||||
pr.Header.Type = uint16(syscall.NLMSG_DONE)
|
||||
pr.Header.Flags = 0
|
||||
pr.Header.Seq = listener.seq
|
||||
pr.Header.Pid = uint32(os.Getpid())
|
||||
|
||||
pr.Data.Id.Idx = _CN_IDX_PROC
|
||||
pr.Data.Id.Val = _CN_VAL_PROC
|
||||
|
||||
pr.Data.Len = uint16(binary.Size(op))
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, pr.Header.Len))
|
||||
binary.Write(buf, byteOrder, pr)
|
||||
binary.Write(buf, byteOrder, op)
|
||||
|
||||
return syscall.Sendto(listener.sock, buf.Bytes(), 0, listener.addr)
|
||||
}
|
283
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go
generated
vendored
283
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/psnotify/psnotify_test.go
generated
vendored
|
@ -1,283 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package psnotify
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type anyEvent struct {
|
||||
exits []int
|
||||
forks []int
|
||||
execs []int
|
||||
errors []error
|
||||
done chan bool
|
||||
}
|
||||
|
||||
type testWatcher struct {
|
||||
t *testing.T
|
||||
watcher *Watcher
|
||||
events *anyEvent
|
||||
}
|
||||
|
||||
// General purpose Watcher wrapper for all tests
|
||||
func newTestWatcher(t *testing.T) *testWatcher {
|
||||
watcher, err := NewWatcher()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
events := &anyEvent{
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
tw := &testWatcher{
|
||||
t: t,
|
||||
watcher: watcher,
|
||||
events: events,
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-events.done:
|
||||
return
|
||||
case ev := <-watcher.Fork:
|
||||
events.forks = append(events.forks, ev.ParentPid)
|
||||
case ev := <-watcher.Exec:
|
||||
events.execs = append(events.execs, ev.Pid)
|
||||
case ev := <-watcher.Exit:
|
||||
events.exits = append(events.exits, ev.Pid)
|
||||
case err := <-watcher.Error:
|
||||
events.errors = append(events.errors, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return tw
|
||||
}
|
||||
|
||||
func (tw *testWatcher) close() {
|
||||
pause := 100 * time.Millisecond
|
||||
time.Sleep(pause)
|
||||
|
||||
tw.events.done <- true
|
||||
|
||||
tw.watcher.Close()
|
||||
|
||||
time.Sleep(pause)
|
||||
}
|
||||
|
||||
func skipTest(t *testing.T) bool {
|
||||
if runtime.GOOS == "linux" && os.Getuid() != 0 {
|
||||
fmt.Println("SKIP: test must be run as root on linux")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func startSleepCommand(t *testing.T) *exec.Cmd {
|
||||
cmd := exec.Command("sh", "-c", "sleep 100")
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCommand(t *testing.T, name string) *exec.Cmd {
|
||||
cmd := exec.Command(name)
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func expectEvents(t *testing.T, num int, name string, pids []int) bool {
|
||||
if len(pids) != num {
|
||||
t.Errorf("Expected %d %s events, got=%v", num, name, pids)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func expectEventPid(t *testing.T, name string, expect int, pid int) bool {
|
||||
if expect != pid {
|
||||
t.Errorf("Expected %s pid=%d, received=%d", name, expect, pid)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestWatchFork(t *testing.T) {
|
||||
if skipTest(t) {
|
||||
return
|
||||
}
|
||||
|
||||
pid := os.Getpid()
|
||||
|
||||
tw := newTestWatcher(t)
|
||||
|
||||
// no watches added yet, so this fork event will no be captured
|
||||
runCommand(t, "date")
|
||||
|
||||
// watch fork events for this process
|
||||
if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// this fork event will be captured,
|
||||
// the exec and exit events will not be captured
|
||||
runCommand(t, "cal")
|
||||
|
||||
tw.close()
|
||||
|
||||
if expectEvents(t, 1, "forks", tw.events.forks) {
|
||||
expectEventPid(t, "fork", pid, tw.events.forks[0])
|
||||
}
|
||||
|
||||
expectEvents(t, 0, "execs", tw.events.execs)
|
||||
expectEvents(t, 0, "exits", tw.events.exits)
|
||||
}
|
||||
|
||||
func TestWatchExit(t *testing.T) {
|
||||
if skipTest(t) {
|
||||
return
|
||||
}
|
||||
|
||||
tw := newTestWatcher(t)
|
||||
|
||||
cmd := startSleepCommand(t)
|
||||
|
||||
childPid := cmd.Process.Pid
|
||||
|
||||
// watch for exit event of our child process
|
||||
if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// kill our child process, triggers exit event
|
||||
syscall.Kill(childPid, syscall.SIGTERM)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
tw.close()
|
||||
|
||||
expectEvents(t, 0, "forks", tw.events.forks)
|
||||
|
||||
expectEvents(t, 0, "execs", tw.events.execs)
|
||||
|
||||
if expectEvents(t, 1, "exits", tw.events.exits) {
|
||||
expectEventPid(t, "exit", childPid, tw.events.exits[0])
|
||||
}
|
||||
}
|
||||
|
||||
// combined version of TestWatchFork() and TestWatchExit()
|
||||
func TestWatchForkAndExit(t *testing.T) {
|
||||
if skipTest(t) {
|
||||
return
|
||||
}
|
||||
|
||||
pid := os.Getpid()
|
||||
|
||||
tw := newTestWatcher(t)
|
||||
|
||||
if err := tw.watcher.Watch(pid, PROC_EVENT_FORK); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cmd := startSleepCommand(t)
|
||||
|
||||
childPid := cmd.Process.Pid
|
||||
|
||||
if err := tw.watcher.Watch(childPid, PROC_EVENT_EXIT); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
syscall.Kill(childPid, syscall.SIGTERM)
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
tw.close()
|
||||
|
||||
if expectEvents(t, 1, "forks", tw.events.forks) {
|
||||
expectEventPid(t, "fork", pid, tw.events.forks[0])
|
||||
}
|
||||
|
||||
expectEvents(t, 0, "execs", tw.events.execs)
|
||||
|
||||
if expectEvents(t, 1, "exits", tw.events.exits) {
|
||||
expectEventPid(t, "exit", childPid, tw.events.exits[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWatchFollowFork(t *testing.T) {
|
||||
if skipTest(t) {
|
||||
return
|
||||
}
|
||||
|
||||
// Darwin is not able to follow forks, as the kqueue fork event
|
||||
// does not provide the child pid.
|
||||
if runtime.GOOS != "linux" {
|
||||
fmt.Println("SKIP: test follow forks is linux only")
|
||||
return
|
||||
}
|
||||
|
||||
pid := os.Getpid()
|
||||
|
||||
tw := newTestWatcher(t)
|
||||
|
||||
// watch for all process events related to this process
|
||||
if err := tw.watcher.Watch(pid, PROC_EVENT_ALL); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
commands := []string{"date", "cal"}
|
||||
childPids := make([]int, len(commands))
|
||||
|
||||
// triggers fork/exec/exit events for each command
|
||||
for i, name := range commands {
|
||||
cmd := runCommand(t, name)
|
||||
childPids[i] = cmd.Process.Pid
|
||||
}
|
||||
|
||||
// remove watch for this process
|
||||
tw.watcher.RemoveWatch(pid)
|
||||
|
||||
// run commands again to make sure we don't receive any unwanted events
|
||||
for _, name := range commands {
|
||||
runCommand(t, name)
|
||||
}
|
||||
|
||||
tw.close()
|
||||
|
||||
// run commands again to make sure nothing panics after
|
||||
// closing the watcher
|
||||
for _, name := range commands {
|
||||
runCommand(t, name)
|
||||
}
|
||||
|
||||
num := len(commands)
|
||||
if expectEvents(t, num, "forks", tw.events.forks) {
|
||||
for _, epid := range tw.events.forks {
|
||||
expectEventPid(t, "fork", pid, epid)
|
||||
}
|
||||
}
|
||||
|
||||
if expectEvents(t, num, "execs", tw.events.execs) {
|
||||
for i, epid := range tw.events.execs {
|
||||
expectEventPid(t, "exec", childPids[i], epid)
|
||||
}
|
||||
}
|
||||
|
||||
if expectEvents(t, num, "exits", tw.events.exits) {
|
||||
for i, epid := range tw.events.exits {
|
||||
expectEventPid(t, "exit", childPids[i], epid)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,467 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package sigar
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/mount.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach/mach_host.h>
|
||||
#include <mach/host_info.h>
|
||||
#include <libproc.h>
|
||||
#include <mach/processor_info.h>
|
||||
#include <mach/vm_map.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
avg := []C.double{0, 0, 0}
|
||||
|
||||
C.getloadavg(&avg[0], C.int(len(avg)))
|
||||
|
||||
self.One = float64(avg[0])
|
||||
self.Five = float64(avg[1])
|
||||
self.Fifteen = float64(avg[2])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
tv := syscall.Timeval32{}
|
||||
|
||||
if err := sysctlbyname("kern.boottime", &tv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Length = time.Since(time.Unix(int64(tv.Sec), int64(tv.Usec)*1000)).Seconds()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
var vmstat C.vm_statistics_data_t
|
||||
|
||||
if err := sysctlbyname("hw.memsize", &self.Total); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := vm_info(&vmstat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kern := uint64(vmstat.inactive_count) << 12
|
||||
self.Free = uint64(vmstat.free_count) << 12
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
self.ActualFree = self.Free + kern
|
||||
self.ActualUsed = self.Used - kern
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type xsw_usage struct {
|
||||
Total, Avail, Used uint64
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
sw_usage := xsw_usage{}
|
||||
|
||||
if err := sysctlbyname("vm.swapusage", &sw_usage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Total = sw_usage.Total
|
||||
self.Used = sw_usage.Used
|
||||
self.Free = sw_usage.Avail
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT
|
||||
var cpuload C.host_cpu_load_info_data_t
|
||||
|
||||
status := C.host_statistics(C.host_t(C.mach_host_self()),
|
||||
C.HOST_CPU_LOAD_INFO,
|
||||
C.host_info_t(unsafe.Pointer(&cpuload)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_statistics error=%d", status)
|
||||
}
|
||||
|
||||
self.User = uint64(cpuload.cpu_ticks[C.CPU_STATE_USER])
|
||||
self.Sys = uint64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM])
|
||||
self.Idle = uint64(cpuload.cpu_ticks[C.CPU_STATE_IDLE])
|
||||
self.Nice = uint64(cpuload.cpu_ticks[C.CPU_STATE_NICE])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
var count C.mach_msg_type_number_t
|
||||
var cpuload *C.processor_cpu_load_info_data_t
|
||||
var ncpu C.natural_t
|
||||
|
||||
status := C.host_processor_info(C.host_t(C.mach_host_self()),
|
||||
C.PROCESSOR_CPU_LOAD_INFO,
|
||||
&ncpu,
|
||||
(*C.processor_info_array_t)(unsafe.Pointer(&cpuload)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_processor_info error=%d", status)
|
||||
}
|
||||
|
||||
// jump through some cgo casting hoops and ensure we properly free
|
||||
// the memory that cpuload points to
|
||||
target := C.vm_map_t(C.mach_task_self_)
|
||||
address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload)))
|
||||
defer C.vm_deallocate(target, address, C.vm_size_t(ncpu))
|
||||
|
||||
// the body of struct processor_cpu_load_info
|
||||
// aka processor_cpu_load_info_data_t
|
||||
var cpu_ticks [C.CPU_STATE_MAX]uint32
|
||||
|
||||
// copy the cpuload array to a []byte buffer
|
||||
// where we can binary.Read the data
|
||||
size := int(ncpu) * binary.Size(cpu_ticks)
|
||||
buf := C.GoBytes(unsafe.Pointer(cpuload), C.int(size))
|
||||
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
|
||||
self.List = make([]Cpu, 0, ncpu)
|
||||
|
||||
for i := 0; i < int(ncpu); i++ {
|
||||
cpu := Cpu{}
|
||||
|
||||
err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpu.User = uint64(cpu_ticks[C.CPU_STATE_USER])
|
||||
cpu.Sys = uint64(cpu_ticks[C.CPU_STATE_SYSTEM])
|
||||
cpu.Idle = uint64(cpu_ticks[C.CPU_STATE_IDLE])
|
||||
cpu.Nice = uint64(cpu_ticks[C.CPU_STATE_NICE])
|
||||
|
||||
self.List = append(self.List, cpu)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
num, err := getfsstat(nil, C.MNT_NOWAIT)
|
||||
if num < 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := make([]syscall.Statfs_t, num)
|
||||
|
||||
num, err = getfsstat(buf, C.MNT_NOWAIT)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fslist := make([]FileSystem, 0, num)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
fs := FileSystem{}
|
||||
|
||||
fs.DirName = bytePtrToString(&buf[i].Mntonname[0])
|
||||
fs.DevName = bytePtrToString(&buf[i].Mntfromname[0])
|
||||
fs.SysTypeName = bytePtrToString(&buf[i].Fstypename[0])
|
||||
|
||||
fslist = append(fslist, fs)
|
||||
}
|
||||
|
||||
self.List = fslist
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
n := C.proc_listpids(C.PROC_ALL_PIDS, 0, nil, 0)
|
||||
if n <= 0 {
|
||||
return syscall.EINVAL
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
n = C.proc_listpids(C.PROC_ALL_PIDS, 0, unsafe.Pointer(&buf[0]), n)
|
||||
if n <= 0 {
|
||||
return syscall.ENOMEM
|
||||
}
|
||||
|
||||
var pid int32
|
||||
num := int(n) / binary.Size(pid)
|
||||
list := make([]int, 0, num)
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
if err := binary.Read(bbuf, binary.LittleEndian, &pid); err != nil {
|
||||
return err
|
||||
}
|
||||
if pid == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, int(pid))
|
||||
}
|
||||
|
||||
self.List = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Name = C.GoString(&info.pbsd.pbi_comm[0])
|
||||
|
||||
switch info.pbsd.pbi_status {
|
||||
case C.SIDL:
|
||||
self.State = RunStateIdle
|
||||
case C.SRUN:
|
||||
self.State = RunStateRun
|
||||
case C.SSLEEP:
|
||||
self.State = RunStateSleep
|
||||
case C.SSTOP:
|
||||
self.State = RunStateStop
|
||||
case C.SZOMB:
|
||||
self.State = RunStateZombie
|
||||
default:
|
||||
self.State = RunStateUnknown
|
||||
}
|
||||
|
||||
self.Ppid = int(info.pbsd.pbi_ppid)
|
||||
|
||||
self.Tty = int(info.pbsd.e_tdev)
|
||||
|
||||
self.Priority = int(info.ptinfo.pti_priority)
|
||||
|
||||
self.Nice = int(info.pbsd.pbi_nice)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Size = uint64(info.ptinfo.pti_virtual_size)
|
||||
self.Resident = uint64(info.ptinfo.pti_resident_size)
|
||||
self.PageFaults = uint64(info.ptinfo.pti_faults)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
info := C.struct_proc_taskallinfo{}
|
||||
|
||||
if err := task_info(pid, &info); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.User =
|
||||
uint64(info.ptinfo.pti_total_user) / uint64(time.Millisecond)
|
||||
|
||||
self.Sys =
|
||||
uint64(info.ptinfo.pti_total_system) / uint64(time.Millisecond)
|
||||
|
||||
self.Total = self.User + self.Sys
|
||||
|
||||
self.StartTime = (uint64(info.pbsd.pbi_start_tvsec) * 1000) +
|
||||
(uint64(info.pbsd.pbi_start_tvusec) / 1000)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
var args []string
|
||||
|
||||
argv := func(arg string) {
|
||||
args = append(args, arg)
|
||||
}
|
||||
|
||||
err := kern_procargs(pid, nil, argv, nil)
|
||||
|
||||
self.List = args
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
exe := func(arg string) {
|
||||
self.Name = arg
|
||||
}
|
||||
|
||||
return kern_procargs(pid, exe, nil, nil)
|
||||
}
|
||||
|
||||
// wrapper around sysctl KERN_PROCARGS2
|
||||
// callbacks params are optional,
|
||||
// up to the caller as to which pieces of data they want
|
||||
func kern_procargs(pid int,
|
||||
exe func(string),
|
||||
argv func(string),
|
||||
env func(string, string)) error {
|
||||
|
||||
mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)}
|
||||
argmax := uintptr(C.ARG_MAX)
|
||||
buf := make([]byte, argmax)
|
||||
err := sysctl(mib, &buf[0], &argmax, nil, 0)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer(buf)
|
||||
bbuf.Truncate(int(argmax))
|
||||
|
||||
var argc int32
|
||||
binary.Read(bbuf, binary.LittleEndian, &argc)
|
||||
|
||||
path, err := bbuf.ReadBytes(0)
|
||||
if exe != nil {
|
||||
exe(string(chop(path)))
|
||||
}
|
||||
|
||||
// skip trailing \0's
|
||||
for {
|
||||
c, _ := bbuf.ReadByte()
|
||||
if c != 0 {
|
||||
bbuf.UnreadByte()
|
||||
break // start of argv[0]
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < int(argc); i++ {
|
||||
arg, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if argv != nil {
|
||||
argv(string(chop(arg)))
|
||||
}
|
||||
}
|
||||
|
||||
if env == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
delim := []byte{61} // "="
|
||||
|
||||
for {
|
||||
line, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF || line[0] == 0 {
|
||||
break
|
||||
}
|
||||
pair := bytes.SplitN(chop(line), delim, 2)
|
||||
env(string(pair[0]), string(pair[1]))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX copied from zsyscall_darwin_amd64.go
|
||||
func sysctl(mib []C.int, old *byte, oldlen *uintptr,
|
||||
new *byte, newlen uintptr) (err error) {
|
||||
var p0 unsafe.Pointer
|
||||
p0 = unsafe.Pointer(&mib[0])
|
||||
_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(p0),
|
||||
uintptr(len(mib)),
|
||||
uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)),
|
||||
uintptr(unsafe.Pointer(new)), uintptr(newlen))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func vm_info(vmstat *C.vm_statistics_data_t) error {
|
||||
var count C.mach_msg_type_number_t = C.HOST_VM_INFO_COUNT
|
||||
|
||||
status := C.host_statistics(
|
||||
C.host_t(C.mach_host_self()),
|
||||
C.HOST_VM_INFO,
|
||||
C.host_info_t(unsafe.Pointer(vmstat)),
|
||||
&count)
|
||||
|
||||
if status != C.KERN_SUCCESS {
|
||||
return fmt.Errorf("host_statistics=%d", status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generic Sysctl buffer unmarshalling
|
||||
func sysctlbyname(name string, data interface{}) (err error) {
|
||||
val, err := syscall.Sysctl(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := []byte(val)
|
||||
|
||||
switch v := data.(type) {
|
||||
case *uint64:
|
||||
*v = *(*uint64)(unsafe.Pointer(&buf[0]))
|
||||
return
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer([]byte(val))
|
||||
return binary.Read(bbuf, binary.LittleEndian, data)
|
||||
}
|
||||
|
||||
// syscall.Getfsstat() wrapper is broken, roll our own to workaround.
|
||||
func getfsstat(buf []syscall.Statfs_t, flags int) (n int, err error) {
|
||||
var ptr uintptr
|
||||
var size uintptr
|
||||
|
||||
if len(buf) > 0 {
|
||||
ptr = uintptr(unsafe.Pointer(&buf[0]))
|
||||
size = unsafe.Sizeof(buf[0]) * uintptr(len(buf))
|
||||
} else {
|
||||
ptr = uintptr(0)
|
||||
size = uintptr(0)
|
||||
}
|
||||
|
||||
trap := uintptr(syscall.SYS_GETFSSTAT64)
|
||||
ret, _, errno := syscall.Syscall(trap, ptr, size, uintptr(flags))
|
||||
|
||||
n = int(ret)
|
||||
if errno != 0 {
|
||||
err = errno
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func task_info(pid int, info *C.struct_proc_taskallinfo) error {
|
||||
size := C.int(unsafe.Sizeof(*info))
|
||||
ptr := unsafe.Pointer(info)
|
||||
|
||||
n := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size)
|
||||
if n != size {
|
||||
return syscall.ENOMEM
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package sigar
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Go version of apr_strfsize
|
||||
func FormatSize(size uint64) string {
|
||||
ord := []string{"K", "M", "G", "T", "P", "E"}
|
||||
o := 0
|
||||
buf := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buf)
|
||||
|
||||
if size < 973 {
|
||||
fmt.Fprintf(w, "%3d ", size)
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
for {
|
||||
remain := size & 1023
|
||||
size >>= 10
|
||||
|
||||
if size >= 973 {
|
||||
o++
|
||||
continue
|
||||
}
|
||||
|
||||
if size < 9 || (size == 9 && remain < 973) {
|
||||
remain = ((remain * 5) + 256) / 512
|
||||
if remain >= 10 {
|
||||
size++
|
||||
remain = 0
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%d.%d%s", size, remain, ord[o])
|
||||
break
|
||||
}
|
||||
|
||||
if remain >= 512 {
|
||||
size++
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, "%3d%s", size, ord[o])
|
||||
break
|
||||
}
|
||||
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func FormatPercent(percent float64) string {
|
||||
return strconv.FormatFloat(percent, 'f', -1, 64) + "%"
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) UsePercent() float64 {
|
||||
b_used := (self.Total - self.Free) / 1024
|
||||
b_avail := self.Avail / 1024
|
||||
utotal := b_used + b_avail
|
||||
used := b_used
|
||||
|
||||
if utotal != 0 {
|
||||
u100 := used * 100
|
||||
pct := u100 / utotal
|
||||
if u100%utotal != 0 {
|
||||
pct += 1
|
||||
}
|
||||
return (float64(pct) / float64(100)) * 100.0
|
||||
}
|
||||
|
||||
return 0.0
|
||||
}
|
||||
|
||||
func (self *Uptime) Format() string {
|
||||
buf := new(bytes.Buffer)
|
||||
w := bufio.NewWriter(buf)
|
||||
uptime := uint64(self.Length)
|
||||
|
||||
days := uptime / (60 * 60 * 24)
|
||||
|
||||
if days != 0 {
|
||||
s := ""
|
||||
if days > 1 {
|
||||
s = "s"
|
||||
}
|
||||
fmt.Fprintf(w, "%d day%s, ", days, s)
|
||||
}
|
||||
|
||||
minutes := uptime / 60
|
||||
hours := minutes / 60
|
||||
hours %= 24
|
||||
minutes %= 60
|
||||
|
||||
fmt.Fprintf(w, "%2d:%02d", hours, minutes)
|
||||
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (self *ProcTime) FormatStartTime() string {
|
||||
if self.StartTime == 0 {
|
||||
return "00:00"
|
||||
}
|
||||
start := time.Unix(int64(self.StartTime)/1000, 0)
|
||||
format := "Jan02"
|
||||
if time.Since(start).Seconds() < (60 * 60 * 24) {
|
||||
format = "15:04"
|
||||
}
|
||||
return start.Format(format)
|
||||
}
|
||||
|
||||
func (self *ProcTime) FormatTotal() string {
|
||||
t := self.Total / 1000
|
||||
ss := t % 60
|
||||
t /= 60
|
||||
mm := t % 60
|
||||
t /= 60
|
||||
hh := t % 24
|
||||
return fmt.Sprintf("%02d:%02d:%02d", hh, mm, ss)
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
package sigar
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type Sigar interface {
|
||||
CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{})
|
||||
GetLoadAverage() (LoadAverage, error)
|
||||
GetMem() (Mem, error)
|
||||
GetSwap() (Swap, error)
|
||||
GetFileSystemUsage(string) (FileSystemUsage, error)
|
||||
}
|
||||
|
||||
type Cpu struct {
|
||||
User uint64
|
||||
Nice uint64
|
||||
Sys uint64
|
||||
Idle uint64
|
||||
Wait uint64
|
||||
Irq uint64
|
||||
SoftIrq uint64
|
||||
Stolen uint64
|
||||
}
|
||||
|
||||
func (cpu *Cpu) Total() uint64 {
|
||||
return cpu.User + cpu.Nice + cpu.Sys + cpu.Idle +
|
||||
cpu.Wait + cpu.Irq + cpu.SoftIrq + cpu.Stolen
|
||||
}
|
||||
|
||||
func (cpu Cpu) Delta(other Cpu) Cpu {
|
||||
return Cpu{
|
||||
User: cpu.User - other.User,
|
||||
Nice: cpu.Nice - other.Nice,
|
||||
Sys: cpu.Sys - other.Sys,
|
||||
Idle: cpu.Idle - other.Idle,
|
||||
Wait: cpu.Wait - other.Wait,
|
||||
Irq: cpu.Irq - other.Irq,
|
||||
SoftIrq: cpu.SoftIrq - other.SoftIrq,
|
||||
Stolen: cpu.Stolen - other.Stolen,
|
||||
}
|
||||
}
|
||||
|
||||
type LoadAverage struct {
|
||||
One, Five, Fifteen float64
|
||||
}
|
||||
|
||||
type Uptime struct {
|
||||
Length float64
|
||||
}
|
||||
|
||||
type Mem struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
ActualFree uint64
|
||||
ActualUsed uint64
|
||||
}
|
||||
|
||||
type Swap struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
}
|
||||
|
||||
type CpuList struct {
|
||||
List []Cpu
|
||||
}
|
||||
|
||||
type FileSystem struct {
|
||||
DirName string
|
||||
DevName string
|
||||
TypeName string
|
||||
SysTypeName string
|
||||
Options string
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
type FileSystemList struct {
|
||||
List []FileSystem
|
||||
}
|
||||
|
||||
type FileSystemUsage struct {
|
||||
Total uint64
|
||||
Used uint64
|
||||
Free uint64
|
||||
Avail uint64
|
||||
Files uint64
|
||||
FreeFiles uint64
|
||||
}
|
||||
|
||||
type ProcList struct {
|
||||
List []int
|
||||
}
|
||||
|
||||
type RunState byte
|
||||
|
||||
const (
|
||||
RunStateSleep = 'S'
|
||||
RunStateRun = 'R'
|
||||
RunStateStop = 'T'
|
||||
RunStateZombie = 'Z'
|
||||
RunStateIdle = 'D'
|
||||
RunStateUnknown = '?'
|
||||
)
|
||||
|
||||
type ProcState struct {
|
||||
Name string
|
||||
State RunState
|
||||
Ppid int
|
||||
Tty int
|
||||
Priority int
|
||||
Nice int
|
||||
Processor int
|
||||
}
|
||||
|
||||
type ProcMem struct {
|
||||
Size uint64
|
||||
Resident uint64
|
||||
Share uint64
|
||||
MinorFaults uint64
|
||||
MajorFaults uint64
|
||||
PageFaults uint64
|
||||
}
|
||||
|
||||
type ProcTime struct {
|
||||
StartTime uint64
|
||||
User uint64
|
||||
Sys uint64
|
||||
Total uint64
|
||||
}
|
||||
|
||||
type ProcArgs struct {
|
||||
List []string
|
||||
}
|
||||
|
||||
type ProcExe struct {
|
||||
Name string
|
||||
Cwd string
|
||||
Root string
|
||||
}
|
135
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go
generated
vendored
135
Godeps/_workspace/src/github.com/cloudfoundry/gosigar/sigar_interface_test.go
generated
vendored
|
@ -1,135 +0,0 @@
|
|||
package sigar_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
var _ = Describe("Sigar", func() {
|
||||
var invalidPid = 666666
|
||||
|
||||
It("cpu", func() {
|
||||
cpu := Cpu{}
|
||||
err := cpu.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("load average", func() {
|
||||
avg := LoadAverage{}
|
||||
err := avg.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("uptime", func() {
|
||||
uptime := Uptime{}
|
||||
err := uptime.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(uptime.Length).To(BeNumerically(">", 0))
|
||||
})
|
||||
|
||||
It("mem", func() {
|
||||
mem := Mem{}
|
||||
err := mem.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(mem.Total).To(BeNumerically(">", 0))
|
||||
Expect(mem.Used + mem.Free).To(BeNumerically("<=", mem.Total))
|
||||
})
|
||||
|
||||
It("swap", func() {
|
||||
swap := Swap{}
|
||||
err := swap.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(swap.Used + swap.Free).To(BeNumerically("<=", swap.Total))
|
||||
})
|
||||
|
||||
It("cpu list", func() {
|
||||
cpulist := CpuList{}
|
||||
err := cpulist.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
nsigar := len(cpulist.List)
|
||||
numcpu := runtime.NumCPU()
|
||||
Expect(nsigar).To(Equal(numcpu))
|
||||
})
|
||||
|
||||
It("file system list", func() {
|
||||
fslist := FileSystemList{}
|
||||
err := fslist.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(fslist.List)).To(BeNumerically(">", 0))
|
||||
})
|
||||
|
||||
It("file system usage", func() {
|
||||
fsusage := FileSystemUsage{}
|
||||
err := fsusage.Get("/")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = fsusage.Get("T O T A L L Y B O G U S")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("proc list", func() {
|
||||
pids := ProcList{}
|
||||
err := pids.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(pids.List)).To(BeNumerically(">", 2))
|
||||
|
||||
err = pids.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("proc state", func() {
|
||||
state := ProcState{}
|
||||
err := state.Get(os.Getppid())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect([]RunState{RunStateRun, RunStateSleep}).To(ContainElement(state.State))
|
||||
Expect([]string{"go", "ginkgo"}).To(ContainElement(state.Name))
|
||||
|
||||
err = state.Get(invalidPid)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("proc mem", func() {
|
||||
mem := ProcMem{}
|
||||
err := mem.Get(os.Getppid())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = mem.Get(invalidPid)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("proc time", func() {
|
||||
time := ProcTime{}
|
||||
err := time.Get(os.Getppid())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = time.Get(invalidPid)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("proc args", func() {
|
||||
args := ProcArgs{}
|
||||
err := args.Get(os.Getppid())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(args.List)).To(BeNumerically(">=", 2))
|
||||
})
|
||||
|
||||
It("proc exe", func() {
|
||||
exe := ProcExe{}
|
||||
err := exe.Get(os.Getppid())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect([]string{"go", "ginkgo"}).To(ContainElement(filepath.Base(exe.Name)))
|
||||
})
|
||||
})
|
|
@ -1,386 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package sigar
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var system struct {
|
||||
ticks uint64
|
||||
btime uint64
|
||||
}
|
||||
|
||||
var Procd string
|
||||
|
||||
func init() {
|
||||
system.ticks = 100 // C.sysconf(C._SC_CLK_TCK)
|
||||
|
||||
Procd = "/proc"
|
||||
|
||||
// grab system boot time
|
||||
readFile(Procd+"/stat", func(line string) bool {
|
||||
if strings.HasPrefix(line, "btime") {
|
||||
system.btime, _ = strtoull(line[6:])
|
||||
return false // stop reading
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
line, err := ioutil.ReadFile(Procd + "/loadavg")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(line))
|
||||
|
||||
self.One, _ = strconv.ParseFloat(fields[0], 64)
|
||||
self.Five, _ = strconv.ParseFloat(fields[1], 64)
|
||||
self.Fifteen, _ = strconv.ParseFloat(fields[2], 64)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
sysinfo := syscall.Sysinfo_t{}
|
||||
|
||||
if err := syscall.Sysinfo(&sysinfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Length = float64(sysinfo.Uptime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
var buffers, cached uint64
|
||||
table := map[string]*uint64{
|
||||
"MemTotal": &self.Total,
|
||||
"MemFree": &self.Free,
|
||||
"Buffers": &buffers,
|
||||
"Cached": &cached,
|
||||
}
|
||||
|
||||
if err := parseMeminfo(table); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
kern := buffers + cached
|
||||
self.ActualFree = self.Free + kern
|
||||
self.ActualUsed = self.Used - kern
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
table := map[string]*uint64{
|
||||
"SwapTotal": &self.Total,
|
||||
"SwapFree": &self.Free,
|
||||
}
|
||||
|
||||
if err := parseMeminfo(table); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
self.Used = self.Total - self.Free
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
return readFile(Procd+"/stat", func(line string) bool {
|
||||
if len(line) > 4 && line[0:4] == "cpu " {
|
||||
parseCpuStat(self, line)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
capacity := len(self.List)
|
||||
if capacity == 0 {
|
||||
capacity = 4
|
||||
}
|
||||
list := make([]Cpu, 0, capacity)
|
||||
|
||||
err := readFile(Procd+"/stat", func(line string) bool {
|
||||
if len(line) > 3 && line[0:3] == "cpu" && line[3] != ' ' {
|
||||
cpu := Cpu{}
|
||||
parseCpuStat(&cpu, line)
|
||||
list = append(list, cpu)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
self.List = list
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
capacity := len(self.List)
|
||||
if capacity == 0 {
|
||||
capacity = 10
|
||||
}
|
||||
fslist := make([]FileSystem, 0, capacity)
|
||||
|
||||
err := readFile("/etc/mtab", func(line string) bool {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
fs := FileSystem{}
|
||||
fs.DevName = fields[0]
|
||||
fs.DirName = fields[1]
|
||||
fs.SysTypeName = fields[2]
|
||||
fs.Options = fields[3]
|
||||
|
||||
fslist = append(fslist, fs)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
self.List = fslist
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
dir, err := os.Open(Procd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
const readAllDirnames = -1 // see os.File.Readdirnames doc
|
||||
|
||||
names, err := dir.Readdirnames(readAllDirnames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
capacity := len(names)
|
||||
list := make([]int, 0, capacity)
|
||||
|
||||
for _, name := range names {
|
||||
if name[0] < '0' || name[0] > '9' {
|
||||
continue
|
||||
}
|
||||
pid, err := strconv.Atoi(name)
|
||||
if err == nil {
|
||||
list = append(list, pid)
|
||||
}
|
||||
}
|
||||
|
||||
self.List = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(contents))
|
||||
|
||||
self.Name = fields[1][1 : len(fields[1])-1] // strip ()'s
|
||||
|
||||
self.State = RunState(fields[2][0])
|
||||
|
||||
self.Ppid, _ = strconv.Atoi(fields[3])
|
||||
|
||||
self.Tty, _ = strconv.Atoi(fields[6])
|
||||
|
||||
self.Priority, _ = strconv.Atoi(fields[17])
|
||||
|
||||
self.Nice, _ = strconv.Atoi(fields[18])
|
||||
|
||||
self.Processor, _ = strconv.Atoi(fields[38])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "statm")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(contents))
|
||||
|
||||
size, _ := strtoull(fields[0])
|
||||
self.Size = size << 12
|
||||
|
||||
rss, _ := strtoull(fields[1])
|
||||
self.Resident = rss << 12
|
||||
|
||||
share, _ := strtoull(fields[2])
|
||||
self.Share = share << 12
|
||||
|
||||
contents, err = readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields = strings.Fields(string(contents))
|
||||
|
||||
self.MinorFaults, _ = strtoull(fields[10])
|
||||
self.MajorFaults, _ = strtoull(fields[12])
|
||||
self.PageFaults = self.MinorFaults + self.MajorFaults
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "stat")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fields := strings.Fields(string(contents))
|
||||
|
||||
user, _ := strtoull(fields[13])
|
||||
sys, _ := strtoull(fields[14])
|
||||
// convert to millis
|
||||
self.User = user * (1000 / system.ticks)
|
||||
self.Sys = sys * (1000 / system.ticks)
|
||||
self.Total = self.User + self.Sys
|
||||
|
||||
// convert to millis
|
||||
self.StartTime, _ = strtoull(fields[21])
|
||||
self.StartTime /= system.ticks
|
||||
self.StartTime += system.btime
|
||||
self.StartTime *= 1000
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
contents, err := readProcFile(pid, "cmdline")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bbuf := bytes.NewBuffer(contents)
|
||||
|
||||
var args []string
|
||||
|
||||
for {
|
||||
arg, err := bbuf.ReadBytes(0)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
args = append(args, string(chop(arg)))
|
||||
}
|
||||
|
||||
self.List = args
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
fields := map[string]*string{
|
||||
"exe": &self.Name,
|
||||
"cwd": &self.Cwd,
|
||||
"root": &self.Root,
|
||||
}
|
||||
|
||||
for name, field := range fields {
|
||||
val, err := os.Readlink(procFileName(pid, name))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*field = val
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseMeminfo(table map[string]*uint64) error {
|
||||
return readFile(Procd+"/meminfo", func(line string) bool {
|
||||
fields := strings.Split(line, ":")
|
||||
|
||||
if ptr := table[fields[0]]; ptr != nil {
|
||||
num := strings.TrimLeft(fields[1], " ")
|
||||
val, err := strtoull(strings.Fields(num)[0])
|
||||
if err == nil {
|
||||
*ptr = val * 1024
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func parseCpuStat(self *Cpu, line string) error {
|
||||
fields := strings.Fields(line)
|
||||
|
||||
self.User, _ = strtoull(fields[1])
|
||||
self.Nice, _ = strtoull(fields[2])
|
||||
self.Sys, _ = strtoull(fields[3])
|
||||
self.Idle, _ = strtoull(fields[4])
|
||||
self.Wait, _ = strtoull(fields[5])
|
||||
self.Irq, _ = strtoull(fields[6])
|
||||
self.SoftIrq, _ = strtoull(fields[7])
|
||||
self.Stolen, _ = strtoull(fields[8])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readFile(file string, handler func(string) bool) error {
|
||||
contents, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(bytes.NewBuffer(contents))
|
||||
|
||||
for {
|
||||
line, _, err := reader.ReadLine()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if !handler(string(line)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func strtoull(val string) (uint64, error) {
|
||||
return strconv.ParseUint(val, 10, 64)
|
||||
}
|
||||
|
||||
func procFileName(pid int, name string) string {
|
||||
return Procd + "/" + strconv.Itoa(pid) + "/" + name
|
||||
}
|
||||
|
||||
func readProcFile(pid int, name string) ([]byte, error) {
|
||||
path := procFileName(pid, name)
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
|
||||
if err != nil {
|
||||
if perr, ok := err.(*os.PathError); ok {
|
||||
if perr.Err == syscall.ENOENT {
|
||||
return nil, syscall.ESRCH
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return contents, err
|
||||
}
|
|
@ -1,225 +0,0 @@
|
|||
package sigar_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
sigar "github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
var _ = Describe("sigarLinux", func() {
|
||||
var procd string
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
procd, err = ioutil.TempDir("", "sigarTests")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
sigar.Procd = procd
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
sigar.Procd = "/proc"
|
||||
})
|
||||
|
||||
Describe("CPU", func() {
|
||||
var (
|
||||
statFile string
|
||||
cpu sigar.Cpu
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
statFile = procd + "/stat"
|
||||
cpu = sigar.Cpu{}
|
||||
})
|
||||
|
||||
Describe("Get", func() {
|
||||
It("gets CPU usage", func() {
|
||||
statContents := []byte("cpu 25 1 2 3 4 5 6 7")
|
||||
err := ioutil.WriteFile(statFile, statContents, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = cpu.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cpu.User).To(Equal(uint64(25)))
|
||||
})
|
||||
|
||||
It("ignores empty lines", func() {
|
||||
statContents := []byte("cpu ")
|
||||
err := ioutil.WriteFile(statFile, statContents, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = cpu.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cpu.User).To(Equal(uint64(0)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CollectCpuStats", func() {
|
||||
It("collects CPU usage over time", func() {
|
||||
statContents := []byte("cpu 25 1 2 3 4 5 6 7")
|
||||
err := ioutil.WriteFile(statFile, statContents, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
concreteSigar := &sigar.ConcreteSigar{}
|
||||
cpuUsages, stop := concreteSigar.CollectCpuStats(500 * time.Millisecond)
|
||||
|
||||
Expect(<-cpuUsages).To(Equal(sigar.Cpu{
|
||||
User: uint64(25),
|
||||
Nice: uint64(1),
|
||||
Sys: uint64(2),
|
||||
Idle: uint64(3),
|
||||
Wait: uint64(4),
|
||||
Irq: uint64(5),
|
||||
SoftIrq: uint64(6),
|
||||
Stolen: uint64(7),
|
||||
}))
|
||||
|
||||
statContents = []byte("cpu 30 3 7 10 25 55 36 65")
|
||||
err = ioutil.WriteFile(statFile, statContents, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(<-cpuUsages).To(Equal(sigar.Cpu{
|
||||
User: uint64(5),
|
||||
Nice: uint64(2),
|
||||
Sys: uint64(5),
|
||||
Idle: uint64(7),
|
||||
Wait: uint64(21),
|
||||
Irq: uint64(50),
|
||||
SoftIrq: uint64(30),
|
||||
Stolen: uint64(58),
|
||||
}))
|
||||
|
||||
stop <- struct{}{}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Mem", func() {
|
||||
var meminfoFile string
|
||||
BeforeEach(func() {
|
||||
meminfoFile = procd + "/meminfo"
|
||||
|
||||
meminfoContents := `
|
||||
MemTotal: 374256 kB
|
||||
MemFree: 274460 kB
|
||||
Buffers: 9764 kB
|
||||
Cached: 38648 kB
|
||||
SwapCached: 0 kB
|
||||
Active: 33772 kB
|
||||
Inactive: 31184 kB
|
||||
Active(anon): 16572 kB
|
||||
Inactive(anon): 552 kB
|
||||
Active(file): 17200 kB
|
||||
Inactive(file): 30632 kB
|
||||
Unevictable: 0 kB
|
||||
Mlocked: 0 kB
|
||||
SwapTotal: 786428 kB
|
||||
SwapFree: 786428 kB
|
||||
Dirty: 0 kB
|
||||
Writeback: 0 kB
|
||||
AnonPages: 16564 kB
|
||||
Mapped: 6612 kB
|
||||
Shmem: 584 kB
|
||||
Slab: 19092 kB
|
||||
SReclaimable: 9128 kB
|
||||
SUnreclaim: 9964 kB
|
||||
KernelStack: 672 kB
|
||||
PageTables: 1864 kB
|
||||
NFS_Unstable: 0 kB
|
||||
Bounce: 0 kB
|
||||
WritebackTmp: 0 kB
|
||||
CommitLimit: 973556 kB
|
||||
Committed_AS: 55880 kB
|
||||
VmallocTotal: 34359738367 kB
|
||||
VmallocUsed: 21428 kB
|
||||
VmallocChunk: 34359713596 kB
|
||||
HardwareCorrupted: 0 kB
|
||||
AnonHugePages: 0 kB
|
||||
HugePages_Total: 0
|
||||
HugePages_Free: 0
|
||||
HugePages_Rsvd: 0
|
||||
HugePages_Surp: 0
|
||||
Hugepagesize: 2048 kB
|
||||
DirectMap4k: 59328 kB
|
||||
DirectMap2M: 333824 kB
|
||||
`
|
||||
err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("returns correct memory info", func() {
|
||||
mem := sigar.Mem{}
|
||||
err := mem.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(mem.Total).To(BeNumerically("==", 374256*1024))
|
||||
Expect(mem.Free).To(BeNumerically("==", 274460*1024))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Swap", func() {
|
||||
var meminfoFile string
|
||||
BeforeEach(func() {
|
||||
meminfoFile = procd + "/meminfo"
|
||||
|
||||
meminfoContents := `
|
||||
MemTotal: 374256 kB
|
||||
MemFree: 274460 kB
|
||||
Buffers: 9764 kB
|
||||
Cached: 38648 kB
|
||||
SwapCached: 0 kB
|
||||
Active: 33772 kB
|
||||
Inactive: 31184 kB
|
||||
Active(anon): 16572 kB
|
||||
Inactive(anon): 552 kB
|
||||
Active(file): 17200 kB
|
||||
Inactive(file): 30632 kB
|
||||
Unevictable: 0 kB
|
||||
Mlocked: 0 kB
|
||||
SwapTotal: 786428 kB
|
||||
SwapFree: 786428 kB
|
||||
Dirty: 0 kB
|
||||
Writeback: 0 kB
|
||||
AnonPages: 16564 kB
|
||||
Mapped: 6612 kB
|
||||
Shmem: 584 kB
|
||||
Slab: 19092 kB
|
||||
SReclaimable: 9128 kB
|
||||
SUnreclaim: 9964 kB
|
||||
KernelStack: 672 kB
|
||||
PageTables: 1864 kB
|
||||
NFS_Unstable: 0 kB
|
||||
Bounce: 0 kB
|
||||
WritebackTmp: 0 kB
|
||||
CommitLimit: 973556 kB
|
||||
Committed_AS: 55880 kB
|
||||
VmallocTotal: 34359738367 kB
|
||||
VmallocUsed: 21428 kB
|
||||
VmallocChunk: 34359713596 kB
|
||||
HardwareCorrupted: 0 kB
|
||||
AnonHugePages: 0 kB
|
||||
HugePages_Total: 0
|
||||
HugePages_Free: 0
|
||||
HugePages_Rsvd: 0
|
||||
HugePages_Surp: 0
|
||||
Hugepagesize: 2048 kB
|
||||
DirectMap4k: 59328 kB
|
||||
DirectMap2M: 333824 kB
|
||||
`
|
||||
err := ioutil.WriteFile(meminfoFile, []byte(meminfoContents), 0444)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("returns correct memory info", func() {
|
||||
swap := sigar.Swap{}
|
||||
err := swap.Get()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(swap.Total).To(BeNumerically("==", 786428*1024))
|
||||
Expect(swap.Free).To(BeNumerically("==", 786428*1024))
|
||||
})
|
||||
})
|
||||
})
|
|
@ -1,13 +0,0 @@
|
|||
package sigar_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGosigar(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Gosigar Suite")
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
// +build darwin freebsd linux netbsd openbsd
|
||||
|
||||
package sigar
|
||||
|
||||
import "syscall"
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
stat := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(path, &stat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bsize := stat.Bsize / 512
|
||||
|
||||
self.Total = (uint64(stat.Blocks) * uint64(bsize)) >> 1
|
||||
self.Free = (uint64(stat.Bfree) * uint64(bsize)) >> 1
|
||||
self.Avail = (uint64(stat.Bavail) * uint64(bsize)) >> 1
|
||||
self.Used = self.Total - self.Free
|
||||
self.Files = stat.Files
|
||||
self.FreeFiles = stat.Ffree
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package sigar
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func bytePtrToString(ptr *int8) string {
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(ptr))
|
||||
|
||||
n := 0
|
||||
for bytes[n] != 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
return string(bytes[0:n])
|
||||
}
|
||||
|
||||
func chop(buf []byte) []byte {
|
||||
return buf[0 : len(buf)-1]
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
// Copyright (c) 2012 VMware, Inc.
|
||||
|
||||
package sigar
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #include <windows.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
}
|
||||
|
||||
func (self *LoadAverage) Get() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Uptime) Get() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Mem) Get() error {
|
||||
var statex C.MEMORYSTATUSEX
|
||||
statex.dwLength = C.DWORD(unsafe.Sizeof(statex))
|
||||
|
||||
succeeded := C.GlobalMemoryStatusEx(&statex)
|
||||
if succeeded == C.FALSE {
|
||||
lastError := C.GetLastError()
|
||||
return fmt.Errorf("GlobalMemoryStatusEx failed with error: %d", int(lastError))
|
||||
}
|
||||
|
||||
self.Total = uint64(statex.ullTotalPhys)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *Swap) Get() error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *Cpu) Get() error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *CpuList) Get() error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *FileSystemList) Get() error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcList) Get() error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcState) Get(pid int) error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcMem) Get(pid int) error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcTime) Get(pid int) error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcArgs) Get(pid int) error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *ProcExe) Get(pid int) error {
|
||||
return notImplemented()
|
||||
}
|
||||
|
||||
func (self *FileSystemUsage) Get(path string) error {
|
||||
var availableBytes C.ULARGE_INTEGER
|
||||
var totalBytes C.ULARGE_INTEGER
|
||||
var totalFreeBytes C.ULARGE_INTEGER
|
||||
|
||||
pathChars := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(pathChars))
|
||||
|
||||
succeeded := C.GetDiskFreeSpaceEx((*C.CHAR)(pathChars), &availableBytes, &totalBytes, &totalFreeBytes)
|
||||
if succeeded == C.FALSE {
|
||||
lastError := C.GetLastError()
|
||||
return fmt.Errorf("GetDiskFreeSpaceEx failed with error: %d", int(lastError))
|
||||
}
|
||||
|
||||
self.Total = *(*uint64)(unsafe.Pointer(&totalBytes))
|
||||
return nil
|
||||
}
|
||||
|
||||
func notImplemented() error {
|
||||
panic("Not Implemented")
|
||||
return nil
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package sigar_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
sigar "github.com/cloudfoundry/gosigar"
|
||||
)
|
||||
|
||||
var _ = Describe("SigarWindows", func() {
|
||||
Describe("Memory", func() {
|
||||
It("gets the total memory", func() {
|
||||
mem := sigar.Mem{}
|
||||
err := mem.Get()
|
||||
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
Ω(mem.Total).Should(BeNumerically(">", 0))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Disk", func() {
|
||||
It("gets the total disk space", func() {
|
||||
usage := sigar.FileSystemUsage{}
|
||||
err := usage.Get(os.TempDir())
|
||||
|
||||
Ω(err).ShouldNot(HaveOccurred())
|
||||
Ω(usage.Total).Should(BeNumerically(">", 0))
|
||||
})
|
||||
})
|
||||
})
|
|
@ -1,5 +1,5 @@
|
|||
language: go
|
||||
sudo: false
|
||||
sudo: required
|
||||
go:
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
|
@ -10,3 +10,6 @@ env:
|
|||
- GOARCH=386
|
||||
script:
|
||||
- make test
|
||||
- DOCKER_HOST=tcp://127.0.0.1:2375 make integration
|
||||
services:
|
||||
- docker
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
# This is the official list of go-dockerclient authors for copyright purposes.
|
||||
|
||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
||||
Adam Bell-Hanssen <adamb@aller.no>
|
||||
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
|
||||
Aldrin Leal <aldrin@leal.eng.br>
|
||||
Andreas Jaekle <andreas@jaekle.net>
|
||||
Andrews Medina <andrewsmedina@gmail.com>
|
||||
|
@ -18,6 +20,7 @@ Cesar Wong <cewong@redhat.com>
|
|||
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
||||
Cheah Chu Yeow <chuyeow@gmail.com>
|
||||
cheneydeng <cheneydeng@qq.com>
|
||||
Chris Bednarski <banzaimonkey@gmail.com>
|
||||
CMGS <ilskdw@gmail.com>
|
||||
Craig Jellick <craig@rancher.com>
|
||||
Dan Williams <dcbw@redhat.com>
|
||||
|
@ -29,6 +32,7 @@ David Huie <dahuie@gmail.com>
|
|||
Dawn Chen <dawnchen@google.com>
|
||||
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
||||
Ed <edrocksit@gmail.com>
|
||||
Elias G. Schneevoigt <eliasgs@gmail.com>
|
||||
Erez Horev <erez.horev@elastifile.com>
|
||||
Eric Anderson <anderson@copperegg.com>
|
||||
Ewout Prangsma <ewout@prangsma.net>
|
||||
|
@ -58,9 +62,11 @@ Liu Peng <vslene@gmail.com>
|
|||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||
Lucas Clemente <lucas@clemente.io>
|
||||
Lucas Weiblen <lucasweiblen@gmail.com>
|
||||
Lyon Hill <lyondhill@gmail.com>
|
||||
Mantas Matelis <mmatelis@coursera.org>
|
||||
Martin Sweeney <martin@sweeney.io>
|
||||
Máximo Cuadros Ortiz <mcuadros@gmail.com>
|
||||
Michael Schmatz <michaelschmatz@gmail.com>
|
||||
Michal Fojtik <mfojtik@redhat.com>
|
||||
Mike Dillon <mike.dillon@synctree.com>
|
||||
Mrunal Patel <mrunalp@gmail.com>
|
||||
|
@ -79,7 +85,9 @@ Rob Miller <rob@kalistra.com>
|
|||
Robert Williamson <williamson.robert@gmail.com>
|
||||
Salvador Gironès <salvadorgirones@gmail.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Silas Sewell <silas@sewell.org>
|
||||
Simon Eskildsen <sirup@sirupsen.com>
|
||||
Simon Menke <simon.menke@gmail.com>
|
||||
Skolos <skolos@gopherlab.com>
|
||||
|
@ -96,4 +104,5 @@ Victor Marmol <vmarmol@google.com>
|
|||
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
||||
Wiliam Souza <wiliamsouza83@gmail.com>
|
||||
Ye Yin <eyniy@qq.com>
|
||||
Yu, Zou <zouyu7@huawei.com>
|
||||
Yuriy Bogdanov <chinsay@gmail.com>
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
fmtcheck \
|
||||
pretest \
|
||||
test \
|
||||
integration \
|
||||
cov \
|
||||
clean
|
||||
|
||||
|
@ -31,13 +32,16 @@ fmt:
|
|||
gofmt -w $(SRCS)
|
||||
|
||||
fmtcheck:
|
||||
$(foreach file,$(SRCS),gofmt $(file) | diff -u $(file) - || exit;)
|
||||
$(foreach file,$(SRCS),gofmt -d $(file);)
|
||||
|
||||
pretest: lint vet fmtcheck
|
||||
|
||||
test: pretest
|
||||
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
||||
|
||||
integration:
|
||||
go test -tags docker_integration -run TestIntegration -v
|
||||
|
||||
cov:
|
||||
@ go get -v github.com/axw/gocov/gocov
|
||||
@ go get golang.org/x/tools/cmd/cover
|
||||
|
|
|
@ -5,14 +5,14 @@
|
|||
[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient)
|
||||
|
||||
This package presents a client for the Docker remote API. It also provides
|
||||
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/API/).
|
||||
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/api/swarm-api/).
|
||||
|
||||
This package also provides support for docker's network API, which is a simple
|
||||
passthrough to the libnetwork remote API. Note that docker's network API is
|
||||
only available in docker 1.8 and above, and only enabled in docker if
|
||||
DOCKER_EXPERIMENTAL is defined during the docker build process.
|
||||
|
||||
For more details, check the [remote API documentation](http://docs.docker.com/en/latest/reference/api/docker_remote_api/).
|
||||
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
|
||||
|
||||
## Vendoring
|
||||
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAuthLegacyConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
c, ok := ac.Configs["docker.io"]
|
||||
if !ok {
|
||||
t.Error("NewAuthConfigurations: Expected Configs to contain docker.io")
|
||||
}
|
||||
if got, want := c.Email, "user@example.com"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Username, "user"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Password, "pass"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.ServerAddress, "docker.io"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthBadConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("userpass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != ErrCannotParseDockercfg {
|
||||
t.Errorf("Incorrect error returned %v\n", err)
|
||||
}
|
||||
if ac != nil {
|
||||
t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthConfig(t *testing.T) {
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("user:pass"))
|
||||
read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth))
|
||||
ac, err := NewAuthConfigurations(read)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
c, ok := ac.Configs["docker.io"]
|
||||
if !ok {
|
||||
t.Error("NewAuthConfigurations: Expected Configs to contain docker.io")
|
||||
}
|
||||
if got, want := c.Email, "user@example.com"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Username, "user"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.Password, "pass"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
if got, want := c.ServerAddress, "docker.io"; got != want {
|
||||
t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthCheck(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
if err := client.AuthCheck(nil); err == nil {
|
||||
t.Fatalf("expected error on nil auth config")
|
||||
}
|
||||
// test good auth
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
*fakeRT = FakeRoundTripper{status: http.StatusUnauthorized}
|
||||
if err := client.AuthCheck(&AuthConfiguration{}); err == nil {
|
||||
t.Fatal("expected failure from unauthorized auth")
|
||||
}
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
func TestBuildImageMultipleContextsError(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
InputStream: &buf,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
}
|
||||
err := client.BuildImage(opts)
|
||||
if err != ErrMultipleContexts {
|
||||
t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildImageContextDirDockerignoreParsing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
|
||||
if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil {
|
||||
t.Errorf("error creating symlink on demand: %s", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := os.Remove("testing/data/symlink"); err != nil {
|
||||
t.Errorf("error removing symlink on demand: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
}
|
||||
err := client.BuildImage(opts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
reqBody := fakeRT.requests[0].Body
|
||||
tmpdir, err := unpackBodyTarball(reqBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpdir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
files, err := ioutil.ReadDir(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
foundFiles := []string{}
|
||||
for _, file := range files {
|
||||
foundFiles = append(foundFiles, file.Name())
|
||||
}
|
||||
|
||||
expectedFiles := []string{
|
||||
".dockerignore",
|
||||
"Dockerfile",
|
||||
"barfile",
|
||||
"ca.pem",
|
||||
"cert.pem",
|
||||
"key.pem",
|
||||
"server.pem",
|
||||
"serverkey.pem",
|
||||
"symlink",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedFiles, foundFiles) {
|
||||
t.Errorf(
|
||||
"BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v",
|
||||
expectedFiles, foundFiles,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildImageSendXRegistryConfig(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
var buf bytes.Buffer
|
||||
opts := BuildImageOptions{
|
||||
Name: "testImage",
|
||||
NoCache: true,
|
||||
SuppressOutput: true,
|
||||
RmTmpContainer: true,
|
||||
ForceRmTmpContainer: true,
|
||||
OutputStream: &buf,
|
||||
ContextDir: "testing/data",
|
||||
AuthConfigs: AuthConfigurations{
|
||||
Configs: map[string]AuthConfiguration{
|
||||
"quay.io": {
|
||||
Username: "foo",
|
||||
Password: "bar",
|
||||
Email: "baz",
|
||||
ServerAddress: "quay.io",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg=="
|
||||
|
||||
if err := client.BuildImage(opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0]
|
||||
if xRegistryConfig != encodedConfig {
|
||||
t.Errorf(
|
||||
"BuildImage: X-Registry-Config not set currectly: expected %q, got %q",
|
||||
encodedConfig,
|
||||
xRegistryConfig,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) {
|
||||
tmpdir, err = ioutil.TempDir("", "go-dockerclient-test")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = archive.Untar(req, tmpdir, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
NoLchown: true,
|
||||
})
|
||||
return
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChangeString(t *testing.T) {
|
||||
var tests = []struct {
|
||||
change Change
|
||||
expected string
|
||||
}{
|
||||
{Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"},
|
||||
{Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"},
|
||||
{Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"},
|
||||
{Change{"/etc/passwd", 33}, " /etc/passwd"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := tt.change.String(); got != tt.expected {
|
||||
t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -27,12 +27,12 @@ import (
|
|||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
||||
)
|
||||
|
||||
const userAgent = "go-dockerclient"
|
||||
|
@ -192,7 +192,7 @@ func NewVersionedClient(endpoint string, apiVersionString string) (*Client, erro
|
|||
}
|
||||
}
|
||||
return &Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
HTTPClient: cleanhttp.DefaultClient(),
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
|
@ -251,17 +251,16 @@ func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
|||
}
|
||||
dockerHost := dockerEnv.dockerHost
|
||||
if dockerEnv.dockerTLSVerify {
|
||||
parts := strings.SplitN(dockerHost, "://", 2)
|
||||
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
||||
}
|
||||
dockerHost = fmt.Sprintf("https://%s", parts[1])
|
||||
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
||||
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
||||
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
||||
return NewVersionedTLSClient(dockerHost, cert, key, ca, apiVersionString)
|
||||
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
|
||||
}
|
||||
return NewVersionedClient(dockerHost, apiVersionString)
|
||||
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
|
||||
}
|
||||
|
||||
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
||||
|
@ -296,9 +295,8 @@ func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock,
|
|||
}
|
||||
tlsConfig.RootCAs = caPool
|
||||
}
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
tr := cleanhttp.DefaultTransport()
|
||||
tr.TLSClientConfig = tlsConfig
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -375,6 +373,7 @@ func (c *Client) getServerAPIVersionString() (version string, err error) {
|
|||
type doOptions struct {
|
||||
data interface{}
|
||||
forceJSON bool
|
||||
headers map[string]string
|
||||
}
|
||||
|
||||
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
|
||||
|
@ -392,7 +391,6 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
httpClient := c.HTTPClient
|
||||
protocol := c.endpointURL.Scheme
|
||||
var u string
|
||||
|
@ -402,7 +400,6 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
|
|||
} else {
|
||||
u = c.getURL(path)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(method, u, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -414,6 +411,9 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
|
|||
req.Header.Set("Content-Type", "plain/text")
|
||||
}
|
||||
|
||||
for k, v := range doOptions.headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
|
@ -421,7 +421,6 @@ func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, e
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, newError(resp)
|
||||
}
|
||||
|
@ -466,13 +465,9 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error
|
|||
address := c.endpointURL.Path
|
||||
if streamOptions.stdout == nil {
|
||||
streamOptions.stdout = ioutil.Discard
|
||||
} else if t, ok := streamOptions.stdout.(io.Closer); ok {
|
||||
defer t.Close()
|
||||
}
|
||||
if streamOptions.stderr == nil {
|
||||
streamOptions.stderr = ioutil.Discard
|
||||
} else if t, ok := streamOptions.stderr.(io.Closer); ok {
|
||||
defer t.Close()
|
||||
}
|
||||
if protocol == "unix" {
|
||||
dial, err := c.Dialer.Dial(protocol, address)
|
||||
|
@ -566,7 +561,6 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error
|
|||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var params io.Reader
|
||||
if hijackOptions.data != nil {
|
||||
buf, err := json.Marshal(hijackOptions.data)
|
||||
|
@ -575,13 +569,6 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error
|
|||
}
|
||||
params = bytes.NewBuffer(buf)
|
||||
}
|
||||
|
||||
if hijackOptions.stdout == nil {
|
||||
hijackOptions.stdout = ioutil.Discard
|
||||
}
|
||||
if hijackOptions.stderr == nil {
|
||||
hijackOptions.stderr = ioutil.Discard
|
||||
}
|
||||
req, err := http.NewRequest(method, c.getURL(path), params)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -618,22 +605,39 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error
|
|||
defer rwc.Close()
|
||||
errChanOut := make(chan error, 1)
|
||||
errChanIn := make(chan error, 1)
|
||||
go func() {
|
||||
defer func() {
|
||||
if hijackOptions.in != nil {
|
||||
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
if hijackOptions.setRawTerminal {
|
||||
_, err = io.Copy(hijackOptions.stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
||||
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
|
||||
close(errChanOut)
|
||||
} else {
|
||||
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
|
||||
// Otherwise, if the only stream you care about is stdin, your attach session
|
||||
// will "hang" until the container terminates, even though you're not reading
|
||||
// stdout/stderr
|
||||
if hijackOptions.stdout == nil {
|
||||
hijackOptions.stdout = ioutil.Discard
|
||||
}
|
||||
errChanOut <- err
|
||||
}()
|
||||
if hijackOptions.stderr == nil {
|
||||
hijackOptions.stderr = ioutil.Discard
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if hijackOptions.in != nil {
|
||||
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
||||
closer.Close()
|
||||
}
|
||||
errChanIn <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
var err error
|
||||
if hijackOptions.setRawTerminal {
|
||||
_, err = io.Copy(hijackOptions.stdout, br)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
||||
}
|
||||
errChanOut <- err
|
||||
}()
|
||||
}
|
||||
go func() {
|
||||
var err error
|
||||
if hijackOptions.in != nil {
|
||||
|
@ -657,7 +661,6 @@ func (c *Client) getURL(path string) string {
|
|||
if c.endpointURL.Scheme == "unix" {
|
||||
urlStr = ""
|
||||
}
|
||||
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
|
@ -673,9 +676,7 @@ func (c *Client) getFakeUnixURL(path string) string {
|
|||
u.Scheme = "http"
|
||||
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
|
||||
u.Path = ""
|
||||
|
||||
urlStr := strings.TrimRight(u.String(), "/")
|
||||
|
||||
if c.requestedAPIVersion != nil {
|
||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
||||
}
|
||||
|
@ -686,7 +687,6 @@ func (c *Client) unixClient() *http.Client {
|
|||
if c.unixHTTPClient != nil {
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
|
||||
socketPath := c.endpointURL.Path
|
||||
c.unixHTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
|
@ -695,7 +695,6 @@ func (c *Client) unixClient() *http.Client {
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
return c.unixHTTPClient
|
||||
}
|
||||
|
||||
|
@ -817,7 +816,7 @@ func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
|||
number, err := strconv.ParseInt(port, 10, 64)
|
||||
if err == nil && number > 0 && number < 65536 {
|
||||
if u.Scheme == "tcp" {
|
||||
if number == 2376 {
|
||||
if tls {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
|
@ -841,7 +840,7 @@ func getDockerEnv() (*dockerEnv, error) {
|
|||
dockerHost := os.Getenv("DOCKER_HOST")
|
||||
var err error
|
||||
if dockerHost == "" {
|
||||
dockerHost, err = getDefaultDockerHost()
|
||||
dockerHost, err = DefaultDockerHost()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -869,14 +868,15 @@ func getDockerEnv() (*dockerEnv, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func getDefaultDockerHost() (string, error) {
|
||||
// DefaultDockerHost returns the default docker socket for the current OS
|
||||
func DefaultDockerHost() (string, error) {
|
||||
var defaultHost string
|
||||
if runtime.GOOS != "windows" {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||
} else {
|
||||
if runtime.GOOS == "windows" {
|
||||
// If we do not have a host, default to TCP socket on Windows
|
||||
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
||||
} else {
|
||||
// If we do not have a host, default to unix socket
|
||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
||||
}
|
||||
return opts.ValidateHost(defaultHost)
|
||||
}
|
||||
|
|
|
@ -1,460 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewAPIClient(t *testing.T) {
|
||||
endpoint := "http://localhost:4243"
|
||||
client, err := NewClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if client.HTTPClient != http.DefaultClient {
|
||||
t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient)
|
||||
}
|
||||
// test unix socket endpoints
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
client, err = NewClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if !client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be true, got false")
|
||||
}
|
||||
if client.requestedAPIVersion != nil {
|
||||
t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func newTLSClient(endpoint string) (*Client, error) {
|
||||
return NewTLSClient(endpoint,
|
||||
"testing/data/cert.pem",
|
||||
"testing/data/key.pem",
|
||||
"testing/data/ca.pem")
|
||||
}
|
||||
|
||||
func TestNewTSLAPIClient(t *testing.T) {
|
||||
endpoint := "https://localhost:4243"
|
||||
client, err := newTLSClient(endpoint)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if !client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be true, got false")
|
||||
}
|
||||
if client.requestedAPIVersion != nil {
|
||||
t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewVersionedClient(t *testing.T) {
|
||||
endpoint := "http://localhost:4243"
|
||||
client, err := NewVersionedClient(endpoint, "1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if client.HTTPClient != http.DefaultClient {
|
||||
t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSVersionedClient(t *testing.T) {
|
||||
certPath := "testing/data/cert.pem"
|
||||
keyPath := "testing/data/key.pem"
|
||||
caPath := "testing/data/ca.pem"
|
||||
endpoint := "https://localhost:4243"
|
||||
client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client.endpoint != endpoint {
|
||||
t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint)
|
||||
}
|
||||
if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" {
|
||||
t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion)
|
||||
}
|
||||
if client.SkipServerVersionCheck {
|
||||
t.Error("Expected SkipServerVersionCheck to be false, got true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSVersionedClientInvalidCA(t *testing.T) {
|
||||
certPath := "testing/data/cert.pem"
|
||||
keyPath := "testing/data/key.pem"
|
||||
caPath := "testing/data/key.pem"
|
||||
endpoint := "https://localhost:4243"
|
||||
_, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14")
|
||||
if err == nil {
|
||||
t.Errorf("Expected invalid ca at %s", caPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClientInvalidEndpoint(t *testing.T) {
|
||||
cases := []string{
|
||||
"htp://localhost:3243", "http://localhost:a", "localhost:8080",
|
||||
"", "localhost", "http://localhost:8080:8383", "http://localhost:65536",
|
||||
"https://localhost:-20",
|
||||
}
|
||||
for _, c := range cases {
|
||||
client, err := NewClient(c)
|
||||
if client != nil {
|
||||
t.Errorf("Want <nil> client for invalid endpoint, got %#v.", client)
|
||||
}
|
||||
if !reflect.DeepEqual(err, ErrInvalidEndpoint) {
|
||||
t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTLSClient(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
expected string
|
||||
}{
|
||||
{"tcp://localhost:2376", "https"},
|
||||
{"tcp://localhost:2375", "https"},
|
||||
{"tcp://localhost:4000", "https"},
|
||||
{"http://localhost:4000", "https"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
client, err := newTLSClient(tt.endpoint)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
got := client.endpointURL.Scheme
|
||||
if got != tt.expected {
|
||||
t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpoint(t *testing.T) {
|
||||
client, err := NewVersionedClient("http://localhost:4243", "1.12")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if endpoint := client.Endpoint(); endpoint != client.endpoint {
|
||||
t.Errorf("Client.Endpoint(): want %q. Got %q", client.endpoint, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetURL(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{"http://localhost:4243/", "/", "http://localhost:4243/"},
|
||||
{"http://localhost:4243", "/", "http://localhost:4243/"},
|
||||
{"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
|
||||
{"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"},
|
||||
{"http://localhost:4243/////", "/", "http://localhost:4243/"},
|
||||
{"unix:///var/run/docker.socket", "/containers", "/containers"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
client, _ := NewClient(tt.endpoint)
|
||||
client.endpoint = tt.endpoint
|
||||
client.SkipServerVersionCheck = true
|
||||
got := client.getURL(tt.path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFakeUnixURL(t *testing.T) {
|
||||
var tests = []struct {
|
||||
endpoint string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{"unix://var/run/docker.sock", "/", "http://unix.sock/"},
|
||||
{"unix://var/run/docker.socket", "/", "http://unix.sock/"},
|
||||
{"unix://var/run/docker.sock", "/containers/ps", "http://unix.sock/containers/ps"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
client, _ := NewClient(tt.endpoint)
|
||||
client.endpoint = tt.endpoint
|
||||
client.SkipServerVersionCheck = true
|
||||
got := client.getFakeUnixURL(tt.path)
|
||||
if got != tt.expected {
|
||||
t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
fakeBody := ioutil.NopCloser(bytes.NewBufferString("bad parameter"))
|
||||
resp := &http.Response{
|
||||
StatusCode: 400,
|
||||
Body: fakeBody,
|
||||
}
|
||||
err := newError(resp)
|
||||
expected := Error{Status: 400, Message: "bad parameter"}
|
||||
if !reflect.DeepEqual(expected, *err) {
|
||||
t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err)
|
||||
}
|
||||
message := "API error (400): bad parameter"
|
||||
if err.Error() != message {
|
||||
t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryString(t *testing.T) {
|
||||
v := float32(2.4)
|
||||
f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64))
|
||||
jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`)
|
||||
var tests = []struct {
|
||||
input interface{}
|
||||
want string
|
||||
}{
|
||||
{&ListContainersOptions{All: true}, "all=1"},
|
||||
{ListContainersOptions{All: true}, "all=1"},
|
||||
{ListContainersOptions{Before: "something"}, "before=something"},
|
||||
{ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"},
|
||||
{ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"},
|
||||
{dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"},
|
||||
{dumb{W: v, X: 10, Y: 10.35000}, f32QueryString},
|
||||
{dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"},
|
||||
{dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"},
|
||||
{dumb{T: 10, Y: 10.35000}, "y=10.35"},
|
||||
{dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson},
|
||||
{nil, ""},
|
||||
{10, ""},
|
||||
{"not_a_struct", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got := queryString(tt.input)
|
||||
if got != tt.want {
|
||||
t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewAPIVersionFailures(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expectedError string
|
||||
}{
|
||||
{"1-0", `Unable to parse version "1-0"`},
|
||||
{"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
v, err := NewAPIVersion(tt.input)
|
||||
if v != nil {
|
||||
t.Errorf("Expected <nil> version, got %v.", v)
|
||||
}
|
||||
if err.Error() != tt.expectedError {
|
||||
t.Errorf("NewAPIVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAPIVersions(t *testing.T) {
|
||||
var tests = []struct {
|
||||
a string
|
||||
b string
|
||||
expectedALessThanB bool
|
||||
expectedALessThanOrEqualToB bool
|
||||
expectedAGreaterThanB bool
|
||||
expectedAGreaterThanOrEqualToB bool
|
||||
}{
|
||||
{"1.11", "1.11", false, true, false, true},
|
||||
{"1.10", "1.11", true, true, false, false},
|
||||
{"1.11", "1.10", false, false, true, true},
|
||||
|
||||
{"1.9", "1.11", true, true, false, false},
|
||||
{"1.11", "1.9", false, false, true, true},
|
||||
|
||||
{"1.1.1", "1.1", false, false, true, true},
|
||||
{"1.1", "1.1.1", true, true, false, false},
|
||||
|
||||
{"2.1", "1.1.1", false, false, true, true},
|
||||
{"2.1", "1.3.1", false, false, true, true},
|
||||
{"1.1.1", "2.1", true, true, false, false},
|
||||
{"1.3.1", "2.1", true, true, false, false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
a, _ := NewAPIVersion(tt.a)
|
||||
b, _ := NewAPIVersion(tt.b)
|
||||
|
||||
if tt.expectedALessThanB && !a.LessThan(b) {
|
||||
t.Errorf("Expected %#v < %#v", a, b)
|
||||
}
|
||||
if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) {
|
||||
t.Errorf("Expected %#v <= %#v", a, b)
|
||||
}
|
||||
if tt.expectedAGreaterThanB && !a.GreaterThan(b) {
|
||||
t.Errorf("Expected %#v > %#v", a, b)
|
||||
}
|
||||
if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) {
|
||||
t.Errorf("Expected %#v >= %#v", a, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingFailing(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
expectedErrMsg := "API error (500): "
|
||||
if err.Error() != expectedErrMsg {
|
||||
t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingFailingWrongStatus(t *testing.T) {
|
||||
fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
expectedErrMsg := "API error (202): "
|
||||
if err.Error() != expectedErrMsg {
|
||||
t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingErrorWithUnixSocket(t *testing.T) {
|
||||
go func() {
|
||||
li, err := net.Listen("unix", "/tmp/echo.sock")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer li.Close()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to get listener, but failed: %#v", err)
|
||||
}
|
||||
|
||||
fd, err := li.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to accept connection, but failed: %#v", err)
|
||||
}
|
||||
|
||||
buf := make([]byte, 512)
|
||||
nr, err := fd.Read(buf)
|
||||
|
||||
// Create invalid response message to trigger error.
|
||||
data := buf[0:nr]
|
||||
for i := 0; i < 10; i++ {
|
||||
data[i] = 63
|
||||
}
|
||||
|
||||
_, err = fd.Write(data)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to write to socket, but failed: %#v", err)
|
||||
}
|
||||
|
||||
return
|
||||
}()
|
||||
|
||||
// Wait for unix socket to listen
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
endpoint := "unix:///tmp/echo.sock"
|
||||
u, _ := parseEndpoint(endpoint, false)
|
||||
client := Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
Dialer: &net.Dialer{},
|
||||
endpoint: endpoint,
|
||||
endpointURL: u,
|
||||
SkipServerVersionCheck: true,
|
||||
}
|
||||
|
||||
err := client.Ping()
|
||||
if err == nil {
|
||||
t.Fatal("Expected non nil error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
type FakeRoundTripper struct {
|
||||
message string
|
||||
status int
|
||||
header map[string]string
|
||||
requests []*http.Request
|
||||
}
|
||||
|
||||
func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
body := strings.NewReader(rt.message)
|
||||
rt.requests = append(rt.requests, r)
|
||||
res := &http.Response{
|
||||
StatusCode: rt.status,
|
||||
Body: ioutil.NopCloser(body),
|
||||
Header: make(http.Header),
|
||||
}
|
||||
for k, v := range rt.header {
|
||||
res.Header.Set(k, v)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (rt *FakeRoundTripper) Reset() {
|
||||
rt.requests = nil
|
||||
}
|
||||
|
||||
type person struct {
|
||||
Name string
|
||||
Age int `json:"age"`
|
||||
}
|
||||
|
||||
type dumb struct {
|
||||
T int `qs:"-"`
|
||||
v int
|
||||
W float32
|
||||
X int
|
||||
Y float64
|
||||
Z int `qs:"zee"`
|
||||
Person *person `qs:"p"`
|
||||
}
|
||||
|
||||
type fakeEndpointURL struct {
|
||||
Scheme string
|
||||
}
|
|
@ -457,6 +457,7 @@ type HostConfig struct {
|
|||
Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"`
|
||||
CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"`
|
||||
CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"`
|
||||
GroupAdd []string `json:"GroupAdd,omitempty" yaml:"GroupAdd,omitempty"`
|
||||
ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"`
|
||||
LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"`
|
||||
Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"`
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,351 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the DOCKER-LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
query string
|
||||
expected string
|
||||
}{
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""},
|
||||
{[]string{"WAT="}, "WAT", ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Get(tt.query)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
query string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true},
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Exists(tt.query)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBool(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false},
|
||||
{"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true},
|
||||
{"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false},
|
||||
}
|
||||
env := Env([]string{
|
||||
"EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false",
|
||||
"NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin",
|
||||
"ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t",
|
||||
})
|
||||
for _, tt := range tests {
|
||||
got := env.GetBool(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetBool(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input bool
|
||||
expected string
|
||||
}{
|
||||
{true, "1"}, {false, "0"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetBool("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInt(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected int
|
||||
}{
|
||||
{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
|
||||
}
|
||||
env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
|
||||
for _, tt := range tests {
|
||||
got := env.GetInt(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetInt(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input int
|
||||
expected string
|
||||
}{
|
||||
{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
|
||||
{0, "0"}, {-34, "-34"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetInt("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInt64(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected int64
|
||||
}{
|
||||
{"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2},
|
||||
}
|
||||
env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"})
|
||||
for _, tt := range tests {
|
||||
got := env.GetInt64(tt.input)
|
||||
if got != tt.expected {
|
||||
t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetInt64(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input int64
|
||||
expected string
|
||||
}{
|
||||
{10, "10"}, {13, "13"}, {7, "7"}, {33, "33"},
|
||||
{0, "0"}, {-34, "-34"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetInt64("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSON(t *testing.T) {
|
||||
var p struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}
|
||||
var env Env
|
||||
env.Set("person", `{"name":"Gopher","age":5}`)
|
||||
err := env.GetJSON("person", &p)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if p.Name != "Gopher" {
|
||||
t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name)
|
||||
}
|
||||
if p.Age != 5 {
|
||||
t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONAbsent(t *testing.T) {
|
||||
var l []string
|
||||
var env Env
|
||||
err := env.GetJSON("person", &l)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if l != nil {
|
||||
t.Errorf("Env.GetJSON(): get unexpected list %v", l)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetJSONFailure(t *testing.T) {
|
||||
var p []string
|
||||
var env Env
|
||||
env.Set("list-person", `{"name":"Gopher","age":5}`)
|
||||
err := env.GetJSON("list-person", &p)
|
||||
if err == nil {
|
||||
t.Errorf("Env.GetJSON(%q): got unexpected <nil> error.", "list-person")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetJSON(t *testing.T) {
|
||||
var p1 = struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}{Name: "Gopher", Age: 5}
|
||||
var env Env
|
||||
err := env.SetJSON("person", p1)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var p2 struct {
|
||||
Name string `json:"name"`
|
||||
Age int `json:"age"`
|
||||
}
|
||||
err = env.GetJSON("person", &p2)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !reflect.DeepEqual(p1, p2) {
|
||||
t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetJSONFailure(t *testing.T) {
|
||||
var env Env
|
||||
err := env.SetJSON("person", unmarshable{})
|
||||
if err == nil {
|
||||
t.Error("Env.SetJSON(): got unexpected <nil> error")
|
||||
}
|
||||
if env.Exists("person") {
|
||||
t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetList(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expected []string
|
||||
}{
|
||||
{"WAT=wat", []string{"wat"}},
|
||||
{`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}},
|
||||
{"WAT=", nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env([]string{tt.input})
|
||||
got := env.GetList("WAT")
|
||||
if !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetList(t *testing.T) {
|
||||
list := []string{"a", "b", "c"}
|
||||
var env Env
|
||||
if err := env.SetList("SOME", list); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) {
|
||||
t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
var env Env
|
||||
env.Set("PATH", "/home/bin:/bin")
|
||||
env.Set("SOMETHING", "/usr/bin")
|
||||
env.Set("PATH", "/bin")
|
||||
if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected {
|
||||
t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
|
||||
}
|
||||
if expected, got := "/bin", env.Get("PATH"); got != expected {
|
||||
t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecode(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input string
|
||||
expectedOut []string
|
||||
expectedErr string
|
||||
}{
|
||||
{
|
||||
`{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`,
|
||||
[]string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`},
|
||||
"",
|
||||
},
|
||||
{"}}", nil, "invalid character '}' looking for beginning of value"},
|
||||
{`{}`, nil, ""},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
err := env.Decode(bytes.NewBufferString(tt.input))
|
||||
if tt.expectedErr == "" {
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
} else if tt.expectedErr != err.Error() {
|
||||
t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err)
|
||||
}
|
||||
got := []string(env)
|
||||
sort.Strings(got)
|
||||
sort.Strings(tt.expectedOut)
|
||||
if !reflect.DeepEqual(got, tt.expectedOut) {
|
||||
t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAuto(t *testing.T) {
|
||||
buf := bytes.NewBufferString("oi")
|
||||
var tests = []struct {
|
||||
input interface{}
|
||||
expected string
|
||||
}{
|
||||
{10, "10"},
|
||||
{10.3, "10"},
|
||||
{"oi", "oi"},
|
||||
{buf, "{}"},
|
||||
{unmarshable{}, "{}"},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
var env Env
|
||||
env.SetAuto("SOME", tt.input)
|
||||
if got := env.Get("SOME"); got != tt.expected {
|
||||
t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMap(t *testing.T) {
|
||||
var tests = []struct {
|
||||
input []string
|
||||
expected map[string]string
|
||||
}{
|
||||
{[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}},
|
||||
{nil, nil},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
env := Env(tt.input)
|
||||
got := env.Map()
|
||||
if !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type unmarshable struct {
|
||||
}
|
||||
|
||||
func (unmarshable) MarshalJSON() ([]byte, error) {
|
||||
return nil, errors.New("cannot marshal")
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestEventListeners(t *testing.T) {
|
||||
testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient)
|
||||
}
|
||||
|
||||
func TestTLSEventListeners(t *testing.T) {
|
||||
testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server {
|
||||
server := httptest.NewUnstartedServer(handler)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem")
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading server key pair: %s", err)
|
||||
}
|
||||
|
||||
caCert, err := ioutil.ReadFile("testing/data/ca.pem")
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading ca certificate: %s", err)
|
||||
}
|
||||
caPool := x509.NewCertPool()
|
||||
if !caPool.AppendCertsFromPEM(caCert) {
|
||||
t.Fatalf("Could not add ca certificate")
|
||||
}
|
||||
|
||||
server.TLS = &tls.Config{
|
||||
Certificates: []tls.Certificate{cert},
|
||||
RootCAs: caPool,
|
||||
}
|
||||
server.StartTLS()
|
||||
return server
|
||||
}, func(url string) (*Client, error) {
|
||||
return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem")
|
||||
})
|
||||
}
|
||||
|
||||
func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) {
|
||||
response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||
{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924}
|
||||
{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966}
|
||||
{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970}
|
||||
`
|
||||
|
||||
server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
rsc := bufio.NewScanner(strings.NewReader(response))
|
||||
for rsc.Scan() {
|
||||
w.Write([]byte(rsc.Text()))
|
||||
w.(http.Flusher).Flush()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client, err := buildClient(server.URL)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create client: %s", err)
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
|
||||
listener := make(chan *APIEvents, 10)
|
||||
defer func() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
if err := client.RemoveEventListener(listener); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = client.AddEventListener(listener)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to add event listener: %s", err)
|
||||
}
|
||||
|
||||
timeout := time.After(1 * time.Second)
|
||||
var count int
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-listener:
|
||||
t.Logf("Received: %v", *msg)
|
||||
count++
|
||||
err = checkEvent(count, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("Check event failed: %s", err)
|
||||
}
|
||||
if count == 4 {
|
||||
return
|
||||
}
|
||||
case <-timeout:
|
||||
t.Fatalf("%s timed out waiting on events", testName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkEvent(index int, event *APIEvents) error {
|
||||
if event.ID != "dfdf82bd3881" {
|
||||
return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID)
|
||||
}
|
||||
if event.From != "base:latest" {
|
||||
return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From)
|
||||
}
|
||||
var status string
|
||||
switch index {
|
||||
case 1:
|
||||
status = "create"
|
||||
case 2:
|
||||
status = "start"
|
||||
case 3:
|
||||
status = "stop"
|
||||
case 4:
|
||||
status = "destroy"
|
||||
}
|
||||
if event.Status != status {
|
||||
return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
func ExampleClient_AttachToContainer() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
client.SkipServerVersionCheck = true
|
||||
// Reading logs from container a84849 and sending them to buf.
|
||||
var buf bytes.Buffer
|
||||
err = client.AttachToContainer(docker.AttachToContainerOptions{
|
||||
Container: "a84849",
|
||||
OutputStream: &buf,
|
||||
Logs: true,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
buf.Reset()
|
||||
err = client.AttachToContainer(docker.AttachToContainerOptions{
|
||||
Container: "a84849",
|
||||
OutputStream: &buf,
|
||||
Stdout: true,
|
||||
Stream: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
}
|
||||
|
||||
func ExampleClient_CopyFromContainer() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cid := "a84849"
|
||||
var buf bytes.Buffer
|
||||
filename := "/tmp/output.txt"
|
||||
err = client.CopyFromContainer(docker.CopyFromContainerOptions{
|
||||
Container: cid,
|
||||
Resource: filename,
|
||||
OutputStream: &buf,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("Error while copying from %s: %s\n", cid, err)
|
||||
}
|
||||
content := new(bytes.Buffer)
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
tr.Next()
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := io.Copy(content, tr); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println(buf.String())
|
||||
}
|
||||
|
||||
func ExampleClient_BuildImage() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
t := time.Now()
|
||||
inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
|
||||
tr := tar.NewWriter(inputbuf)
|
||||
tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t})
|
||||
tr.Write([]byte("FROM base\n"))
|
||||
tr.Close()
|
||||
opts := docker.BuildImageOptions{
|
||||
Name: "test",
|
||||
InputStream: inputbuf,
|
||||
OutputStream: outputbuf,
|
||||
}
|
||||
if err := client.BuildImage(opts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListenEvents() {
|
||||
client, err := docker.NewClient("http://localhost:4243")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
listener := make(chan *docker.APIEvents)
|
||||
err = client.AddEventListener(listener)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
err = client.RemoveEventListener(listener)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
timeout := time.After(1 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-listener:
|
||||
log.Println(msg)
|
||||
case <-timeout:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ExampleEnv_Map() {
|
||||
e := docker.Env([]string{"A=1", "B=2", "C=3"})
|
||||
envs := e.Map()
|
||||
for k, v := range envs {
|
||||
fmt.Printf("%s=%q\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEnv_SetJSON() {
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{Name: "Gopher", Age: 4}
|
||||
var e docker.Env
|
||||
err := e.SetJSON("person", p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleEnv_GetJSON() {
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{Name: "Gopher", Age: 4}
|
||||
var e docker.Env
|
||||
e.Set("person", `{"name":"Gopher","age":4}`)
|
||||
err := e.GetJSON("person", &p)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -1,262 +0,0 @@
|
|||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExecCreate(t *testing.T) {
|
||||
jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}`
|
||||
var expected struct{ ID string }
|
||||
err := json.Unmarshal([]byte(jsonContainer), &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
config := CreateExecOptions{
|
||||
Container: "test",
|
||||
AttachStdin: true,
|
||||
AttachStdout: true,
|
||||
AttachStderr: false,
|
||||
Tty: false,
|
||||
Cmd: []string{"touch", "/tmp/file"},
|
||||
User: "a-user",
|
||||
}
|
||||
execObj, err := client.CreateExec(config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
if execObj.ID != expectedID {
|
||||
t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/containers/test/exec"))
|
||||
if gotPath := req.URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
var gotBody struct{ ID string }
|
||||
err = json.NewDecoder(req.Body).Decode(&gotBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecStartDetached(t *testing.T) {
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
config := StartExecOptions{
|
||||
Detach: true,
|
||||
}
|
||||
err := client.StartExec(execID, config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start"))
|
||||
if gotPath := req.URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
t.Log(req.Body)
|
||||
var gotBody struct{ Detach bool }
|
||||
err = json.NewDecoder(req.Body).Decode(&gotBody)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !gotBody.Detach {
|
||||
t.Fatal("Expected Detach in StartExecOptions to be true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecStartAndAttach(t *testing.T) {
|
||||
var reader = strings.NewReader("send value")
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5})
|
||||
w.Write([]byte("hello"))
|
||||
}))
|
||||
defer server.Close()
|
||||
client, _ := NewClient(server.URL)
|
||||
client.SkipServerVersionCheck = true
|
||||
var stdout, stderr bytes.Buffer
|
||||
success := make(chan struct{})
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
opts := StartExecOptions{
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
InputStream: reader,
|
||||
RawTerminal: true,
|
||||
Success: success,
|
||||
}
|
||||
go func() {
|
||||
if err := client.StartExec(execID, opts); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
<-success
|
||||
}
|
||||
|
||||
func TestExecResize(t *testing.T) {
|
||||
execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"
|
||||
fakeRT := &FakeRoundTripper{status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
err := client.ResizeExecTTY(execID, 10, 20)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "POST" {
|
||||
t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20"))
|
||||
if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() {
|
||||
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecInspect(t *testing.T) {
|
||||
jsonExec := `{
|
||||
"ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e",
|
||||
"Running": true,
|
||||
"ExitCode": 0,
|
||||
"ProcessConfig": {
|
||||
"privileged": false,
|
||||
"user": "",
|
||||
"tty": true,
|
||||
"entrypoint": "bash",
|
||||
"arguments": []
|
||||
},
|
||||
"OpenStdin": true,
|
||||
"OpenStderr": true,
|
||||
"OpenStdout": true,
|
||||
"Container": {
|
||||
"State": {
|
||||
"Running": true,
|
||||
"Paused": false,
|
||||
"Restarting": false,
|
||||
"OOMKilled": false,
|
||||
"Pid": 29392,
|
||||
"ExitCode": 0,
|
||||
"Error": "",
|
||||
"StartedAt": "2015-01-21T17:08:59.634662178Z",
|
||||
"FinishedAt": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521",
|
||||
"Created": "2015-01-21T17:08:59.46407212Z",
|
||||
"Path": "/bin/bash",
|
||||
"Args": [
|
||||
"-lc",
|
||||
"tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
|
||||
],
|
||||
"Config": {
|
||||
"Hostname": "922cd0568714",
|
||||
"Domainname": "",
|
||||
"User": "ubuntu",
|
||||
"Memory": 0,
|
||||
"MemorySwap": 0,
|
||||
"CpuShares": 100,
|
||||
"Cpuset": "",
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"AttachStderr": false,
|
||||
"PortSpecs": null,
|
||||
"ExposedPorts": {
|
||||
"8888/tcp": {}
|
||||
},
|
||||
"Tty": false,
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Cmd": [
|
||||
"/bin/bash",
|
||||
"-lc",
|
||||
"tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
|
||||
],
|
||||
"Image": "tsuru/app-gtest",
|
||||
"Volumes": null,
|
||||
"WorkingDir": "",
|
||||
"Entrypoint": null,
|
||||
"NetworkDisabled": false,
|
||||
"MacAddress": "",
|
||||
"OnBuild": null
|
||||
},
|
||||
"Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544",
|
||||
"NetworkSettings": {
|
||||
"IPAddress": "172.17.0.8",
|
||||
"IPPrefixLen": 16,
|
||||
"MacAddress": "02:42:ac:11:00:08",
|
||||
"LinkLocalIPv6Address": "fe80::42:acff:fe11:8",
|
||||
"LinkLocalIPv6PrefixLen": 64,
|
||||
"GlobalIPv6Address": "",
|
||||
"GlobalIPv6PrefixLen": 0,
|
||||
"Gateway": "172.17.42.1",
|
||||
"IPv6Gateway": "",
|
||||
"Bridge": "docker0",
|
||||
"PortMapping": null,
|
||||
"Ports": {
|
||||
"8888/tcp": [
|
||||
{
|
||||
"HostIp": "0.0.0.0",
|
||||
"HostPort": "49156"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf",
|
||||
"HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname",
|
||||
"HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts",
|
||||
"Name": "/c7e43b72288ee9d0270a",
|
||||
"Driver": "aufs",
|
||||
"ExecDriver": "native-0.2",
|
||||
"MountLabel": "",
|
||||
"ProcessLabel": "",
|
||||
"AppArmorProfile": "",
|
||||
"RestartCount": 0,
|
||||
"UpdateDns": false,
|
||||
"Volumes": {},
|
||||
"VolumesRW": {}
|
||||
}
|
||||
}`
|
||||
var expected ExecInspect
|
||||
err := json.Unmarshal([]byte(jsonExec), &expected)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK}
|
||||
client := newTestClient(fakeRT)
|
||||
expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e"
|
||||
execObj, err := client.InspectExec(expectedID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(*execObj, expected) {
|
||||
t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj)
|
||||
}
|
||||
req := fakeRT.requests[0]
|
||||
if req.Method != "GET" {
|
||||
t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method)
|
||||
}
|
||||
expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json"))
|
||||
if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
|
||||
t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
|
||||
}
|
||||
}
|
|
@ -1,26 +1,55 @@
|
|||
# (Unreleased)
|
||||
# 0.9.0 (Unreleased)
|
||||
|
||||
logrus/core: improve performance of text formatter by 40%
|
||||
logrus/core: expose `LevelHooks` type
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
logrus: fix more Fatal family functions
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
logrus: defaults to stderr instead of stdout
|
||||
hooks/sentry: add special field for `*http.Request`
|
||||
formatter/text: ignore Windows for colors
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
formatter/\*: allow configuration of timestamp layout
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
formatter/text: Add configuration option for time format (#158)
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
|
|
|
@ -75,17 +75,12 @@ package main
|
|||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
|
@ -182,13 +177,16 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
|
@ -198,25 +196,30 @@ func init() {
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||
|
||||
| Hook | Description |
|
||||
| ----- | ----------- |
|
||||
| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
|
||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||
|
||||
#### Level logging
|
||||
|
||||
|
@ -272,7 +275,7 @@ init() {
|
|||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(&logrus.JSONFormatter{})
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
|
@ -294,10 +297,10 @@ The built-in logging formatters are:
|
|||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
|
||||
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
|
||||
logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"})
|
||||
```
|
||||
|
||||
Third party logging formatters:
|
||||
|
@ -315,7 +318,7 @@ type MyJSONFormatter struct {
|
|||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
|
|
26
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"number": 1,
|
||||
"size": 10,
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
|
||||
Output:
|
||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||
|
||||
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
*/
|
||||
package logrus
|
|
@ -1,53 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEntryPanicln(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom time")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicln("kaboom")
|
||||
}
|
||||
|
||||
func TestEntryPanicf(t *testing.T) {
|
||||
errBoom := fmt.Errorf("boom again")
|
||||
|
||||
defer func() {
|
||||
p := recover()
|
||||
assert.NotNil(t, p)
|
||||
|
||||
switch pVal := p.(type) {
|
||||
case *Entry:
|
||||
assert.Equal(t, "kaboom true", pVal.Message)
|
||||
assert.Equal(t, errBoom, pVal.Data["err"])
|
||||
default:
|
||||
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal)
|
||||
}
|
||||
}()
|
||||
|
||||
logger := New()
|
||||
logger.Out = &bytes.Buffer{}
|
||||
entry := NewEntry(logger)
|
||||
entry.WithField("err", errBoom).Panicf("kaboom %v", true)
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// smallFields is a small size data set for benchmarking
|
||||
var smallFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
}
|
||||
|
||||
// largeFields is a large size data set for benchmarking
|
||||
var largeFields = Fields{
|
||||
"foo": "bar",
|
||||
"baz": "qux",
|
||||
"one": "two",
|
||||
"three": "four",
|
||||
"five": "six",
|
||||
"seven": "eight",
|
||||
"nine": "ten",
|
||||
"eleven": "twelve",
|
||||
"thirteen": "fourteen",
|
||||
"fifteen": "sixteen",
|
||||
"seventeen": "eighteen",
|
||||
"nineteen": "twenty",
|
||||
"a": "b",
|
||||
"c": "d",
|
||||
"e": "f",
|
||||
"g": "h",
|
||||
"i": "j",
|
||||
"k": "l",
|
||||
"m": "n",
|
||||
"o": "p",
|
||||
"q": "r",
|
||||
"s": "t",
|
||||
"u": "v",
|
||||
"w": "x",
|
||||
"y": "z",
|
||||
"this": "will",
|
||||
"make": "thirty",
|
||||
"entries": "yeah",
|
||||
}
|
||||
|
||||
var errorFields = Fields{
|
||||
"foo": fmt.Errorf("bar"),
|
||||
"baz": fmt.Errorf("qux"),
|
||||
}
|
||||
|
||||
func BenchmarkErrorTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeColoredTextFormatter(b *testing.B) {
|
||||
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields)
|
||||
}
|
||||
|
||||
func BenchmarkSmallJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, smallFields)
|
||||
}
|
||||
|
||||
func BenchmarkLargeJSONFormatter(b *testing.B) {
|
||||
doBenchmark(b, &JSONFormatter{}, largeFields)
|
||||
}
|
||||
|
||||
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) {
|
||||
entry := &Entry{
|
||||
Time: time.Time{},
|
||||
Level: InfoLevel,
|
||||
Message: "message",
|
||||
Data: fields,
|
||||
}
|
||||
var d []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
d, err = formatter.Format(entry)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(d)))
|
||||
}
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type TestHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *TestHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *TestHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookFires(t *testing.T) {
|
||||
hook := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ModifyHook struct {
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Fire(entry *Entry) error {
|
||||
entry.Data["wow"] = "whale"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ModifyHook) Levels() []Level {
|
||||
return []Level{
|
||||
DebugLevel,
|
||||
InfoLevel,
|
||||
WarnLevel,
|
||||
ErrorLevel,
|
||||
FatalLevel,
|
||||
PanicLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestHookCanModifyEntry(t *testing.T) {
|
||||
hook := new(ModifyHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCanFireMultipleHooks(t *testing.T) {
|
||||
hook1 := new(ModifyHook)
|
||||
hook2 := new(TestHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook1)
|
||||
log.Hooks.Add(hook2)
|
||||
|
||||
log.WithField("wow", "elephant").Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["wow"], "whale")
|
||||
assert.Equal(t, hook2.Fired, true)
|
||||
})
|
||||
}
|
||||
|
||||
type ErrorHook struct {
|
||||
Fired bool
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Fire(entry *Entry) error {
|
||||
hook.Fired = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hook *ErrorHook) Levels() []Level {
|
||||
return []Level{
|
||||
ErrorLevel,
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorHookShouldntFireOnInfo(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, false)
|
||||
})
|
||||
}
|
||||
|
||||
func TestErrorHookShouldFireOnError(t *testing.T) {
|
||||
hook := new(ErrorHook)
|
||||
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Hooks.Add(hook)
|
||||
log.Error("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, hook.Fired, true)
|
||||
})
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestErrorNotLost(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("error", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["error"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("omg", errors.New("wild walrus")))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["omg"] != "wild walrus" {
|
||||
t.Fatal("Error field not set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithTime(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("time", "right now!"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.time"] != "right now!" {
|
||||
t.Fatal("fields.time not set to original time field")
|
||||
}
|
||||
|
||||
if entry["time"] != "0001-01-01T00:00:00Z" {
|
||||
t.Fatal("time field not set to current time, was: ", entry["time"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithMsg(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("msg", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.msg"] != "something" {
|
||||
t.Fatal("fields.msg not set to original msg field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldClashWithLevel(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
entry := make(map[string]interface{})
|
||||
err = json.Unmarshal(b, &entry)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to unmarshal formatted entry: ", err)
|
||||
}
|
||||
|
||||
if entry["fields.level"] != "something" {
|
||||
t.Fatal("fields.level not set to original level field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONEntryEndsWithNewline(t *testing.T) {
|
||||
formatter := &JSONFormatter{}
|
||||
|
||||
b, err := formatter.Format(WithField("level", "something"))
|
||||
if err != nil {
|
||||
t.Fatal("Unable to format entry: ", err)
|
||||
}
|
||||
|
||||
if b[len(b)-1] != '\n' {
|
||||
t.Fatal("Expected JSON log entry to end with a newline")
|
||||
}
|
||||
}
|
|
@ -1,301 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
log(logger)
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = &TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
log(logger)
|
||||
|
||||
fields := make(map[string]string)
|
||||
for _, kv := range strings.Split(buffer.String(), " ") {
|
||||
if !strings.Contains(kv, "=") {
|
||||
continue
|
||||
}
|
||||
kvArr := strings.Split(kv, "=")
|
||||
key := strings.TrimSpace(kvArr[0])
|
||||
val := kvArr[1]
|
||||
if kvArr[1][0] == '"' {
|
||||
var err error
|
||||
val, err = strconv.Unquote(val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
fields[key] = val
|
||||
}
|
||||
assertions(fields)
|
||||
}
|
||||
|
||||
func TestPrint(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Print("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfo(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWarn(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Warn("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["level"], "warning")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Infoln(10, 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "10 10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", 10)
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test10")
|
||||
})
|
||||
}
|
||||
|
||||
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.Info("test", "test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "testtest")
|
||||
})
|
||||
}
|
||||
|
||||
func TestWithFieldsShouldAllowAssignments(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
localLog := logger.WithFields(Fields{
|
||||
"key1": "value1",
|
||||
})
|
||||
|
||||
localLog.WithField("key2", "value2").Info("test")
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Equal(t, "value2", fields["key2"])
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
|
||||
buffer = bytes.Buffer{}
|
||||
fields = Fields{}
|
||||
localLog.Info("test")
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.Nil(t, err)
|
||||
|
||||
_, ok := fields["key2"]
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Equal(t, "value1", fields["key1"])
|
||||
}
|
||||
|
||||
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("msg", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["msg"], "test")
|
||||
assert.Equal(t, fields["fields.msg"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("time", "hello").Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["fields.time"], "hello")
|
||||
})
|
||||
}
|
||||
|
||||
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) {
|
||||
LogAndAssertJSON(t, func(log *Logger) {
|
||||
log.WithField("level", 1).Info("test")
|
||||
}, func(fields Fields) {
|
||||
assert.Equal(t, fields["level"], "info")
|
||||
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultFieldsAreNotPrefixed(t *testing.T) {
|
||||
LogAndAssertText(t, func(log *Logger) {
|
||||
ll := log.WithField("herp", "derp")
|
||||
ll.Info("hello")
|
||||
ll.Info("bye")
|
||||
}, func(fields map[string]string) {
|
||||
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} {
|
||||
if _, ok := fields[fieldName]; ok {
|
||||
t.Fatalf("should not have prefixed %q: %v", fieldName, fields)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) {
|
||||
|
||||
var buffer bytes.Buffer
|
||||
var fields Fields
|
||||
|
||||
logger := New()
|
||||
logger.Out = &buffer
|
||||
logger.Formatter = new(JSONFormatter)
|
||||
|
||||
llog := logger.WithField("context", "eating raw fish")
|
||||
|
||||
llog.Info("looks delicious")
|
||||
|
||||
err := json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded first message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "looks delicious")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
llog.Warn("omg it is!")
|
||||
|
||||
err = json.Unmarshal(buffer.Bytes(), &fields)
|
||||
assert.NoError(t, err, "should have decoded second message")
|
||||
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields")
|
||||
assert.Equal(t, fields["msg"], "omg it is!")
|
||||
assert.Equal(t, fields["context"], "eating raw fish")
|
||||
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry")
|
||||
|
||||
}
|
||||
|
||||
func TestConvertLevelToString(t *testing.T) {
|
||||
assert.Equal(t, "debug", DebugLevel.String())
|
||||
assert.Equal(t, "info", InfoLevel.String())
|
||||
assert.Equal(t, "warning", WarnLevel.String())
|
||||
assert.Equal(t, "error", ErrorLevel.String())
|
||||
assert.Equal(t, "fatal", FatalLevel.String())
|
||||
assert.Equal(t, "panic", PanicLevel.String())
|
||||
}
|
||||
|
||||
func TestParseLevel(t *testing.T) {
|
||||
l, err := ParseLevel("panic")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, PanicLevel, l)
|
||||
|
||||
l, err = ParseLevel("fatal")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, FatalLevel, l)
|
||||
|
||||
l, err = ParseLevel("error")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, ErrorLevel, l)
|
||||
|
||||
l, err = ParseLevel("warn")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("warning")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, WarnLevel, l)
|
||||
|
||||
l, err = ParseLevel("info")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, InfoLevel, l)
|
||||
|
||||
l, err = ParseLevel("debug")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, DebugLevel, l)
|
||||
|
||||
l, err = ParseLevel("invalid")
|
||||
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error())
|
||||
}
|
||||
|
||||
func TestGetSetLevelRace(t *testing.T) {
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
if i%2 == 0 {
|
||||
SetLevel(InfoLevel)
|
||||
} else {
|
||||
GetLevel()
|
||||
}
|
||||
}(i)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
|
||||
*/
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]uint8
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
|
@ -3,7 +3,7 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin freebsd openbsd
|
||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package logrus
|
||||
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
|
@ -73,17 +73,20 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
if isColored {
|
||||
f.printColored(b, entry, keys)
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
if entry.Message != "" {
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
}
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
|
@ -93,7 +96,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
|
@ -111,11 +114,11 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||
if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestQuoting(t *testing.T) {
|
||||
tf := &TextFormatter{DisableColors: true}
|
||||
|
||||
checkQuoting := func(q bool, value interface{}) {
|
||||
b, _ := tf.Format(WithField("test", value))
|
||||
idx := bytes.Index(b, ([]byte)("test="))
|
||||
cont := bytes.Contains(b[idx+5:], []byte{'"'})
|
||||
if cont != q {
|
||||
if q {
|
||||
t.Errorf("quoting expected for: %#v", value)
|
||||
} else {
|
||||
t.Errorf("quoting not expected for: %#v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
checkQuoting(false, "abcd")
|
||||
checkQuoting(false, "v1.0")
|
||||
checkQuoting(false, "1234567890")
|
||||
checkQuoting(true, "/foobar")
|
||||
checkQuoting(true, "x y")
|
||||
checkQuoting(true, "x,y")
|
||||
checkQuoting(false, errors.New("invalid"))
|
||||
checkQuoting(true, errors.New("invalid argument"))
|
||||
}
|
||||
|
||||
func TestTimestampFormat(t *testing.T) {
|
||||
checkTimeStr := func(format string) {
|
||||
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format}
|
||||
customStr, _ := customFormatter.Format(WithField("test", "test"))
|
||||
timeStart := bytes.Index(customStr, ([]byte)("time="))
|
||||
timeEnd := bytes.Index(customStr, ([]byte)("level="))
|
||||
timeStr := customStr[timeStart+5 : timeEnd-1]
|
||||
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' {
|
||||
timeStr = timeStr[1 : len(timeStr)-1]
|
||||
}
|
||||
if format == "" {
|
||||
format = time.RFC3339
|
||||
}
|
||||
_, e := time.Parse(format, (string)(timeStr))
|
||||
if e != nil {
|
||||
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e)
|
||||
}
|
||||
}
|
||||
|
||||
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00")
|
||||
checkTimeStr("Mon Jan _2 15:04:05 2006")
|
||||
checkTimeStr("")
|
||||
}
|
||||
|
||||
// TODO add tests for sorting etc., this requires a parser for the text
|
||||
// formatter output.
|
259
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
Normal file
259
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
|||
package errcode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
||||
// users of each to just call ErrorCode to get the real ID of each
|
||||
type ErrorCoder interface {
|
||||
ErrorCode() ErrorCode
|
||||
}
|
||||
|
||||
// ErrorCode represents the error type. The errors are serialized via strings
|
||||
// and the integer format may change and should *never* be exported.
|
||||
type ErrorCode int
|
||||
|
||||
var _ error = ErrorCode(0)
|
||||
|
||||
// ErrorCode just returns itself
|
||||
func (ec ErrorCode) ErrorCode() ErrorCode {
|
||||
return ec
|
||||
}
|
||||
|
||||
// Error returns the ID/Value
|
||||
func (ec ErrorCode) Error() string {
|
||||
return ec.Descriptor().Value
|
||||
}
|
||||
|
||||
// Descriptor returns the descriptor for the error code.
|
||||
func (ec ErrorCode) Descriptor() ErrorDescriptor {
|
||||
d, ok := errorCodeToDescriptors[ec]
|
||||
|
||||
if !ok {
|
||||
return ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// String returns the canonical identifier for this error code.
|
||||
func (ec ErrorCode) String() string {
|
||||
return ec.Descriptor().Value
|
||||
}
|
||||
|
||||
// Message returned the human-readable error message for this error code.
|
||||
func (ec ErrorCode) Message() string {
|
||||
return ec.Descriptor().Message
|
||||
}
|
||||
|
||||
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
||||
// result.
|
||||
func (ec ErrorCode) MarshalText() (text []byte, err error) {
|
||||
return []byte(ec.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText decodes the form generated by MarshalText.
|
||||
func (ec *ErrorCode) UnmarshalText(text []byte) error {
|
||||
desc, ok := idToDescriptors[string(text)]
|
||||
|
||||
if !ok {
|
||||
desc = ErrorCodeUnknown.Descriptor()
|
||||
}
|
||||
|
||||
*ec = desc.Code
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDetail creates a new Error struct based on the passed-in info and
|
||||
// set the Detail property appropriately
|
||||
func (ec ErrorCode) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithDetail(detail)
|
||||
}
|
||||
|
||||
// WithArgs creates a new Error struct and sets the Args slice
|
||||
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: ec,
|
||||
Message: ec.Message(),
|
||||
}.WithArgs(args...)
|
||||
}
|
||||
|
||||
// Error provides a wrapper around ErrorCode with extra Details provided.
|
||||
type Error struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
|
||||
// TODO(duglin): See if we need an "args" property so we can do the
|
||||
// variable substitution right before showing the message to the user
|
||||
}
|
||||
|
||||
var _ error = Error{}
|
||||
|
||||
// ErrorCode returns the ID/Value of this Error
|
||||
func (e Error) ErrorCode() ErrorCode {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// Error returns a human readable representation of the error.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s",
|
||||
strings.ToLower(strings.Replace(e.Code.String(), "_", " ", -1)),
|
||||
e.Message)
|
||||
}
|
||||
|
||||
// WithDetail will return a new Error, based on the current one, but with
|
||||
// some Detail info added
|
||||
func (e Error) WithDetail(detail interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: e.Message,
|
||||
Detail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
// WithArgs uses the passed-in list of interface{} as the substitution
|
||||
// variables in the Error's Message string, but returns a new Error
|
||||
func (e Error) WithArgs(args ...interface{}) Error {
|
||||
return Error{
|
||||
Code: e.Code,
|
||||
Message: fmt.Sprintf(e.Code.Message(), args...),
|
||||
Detail: e.Detail,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorDescriptor provides relevant information about a given error code.
|
||||
type ErrorDescriptor struct {
|
||||
// Code is the error code that this descriptor describes.
|
||||
Code ErrorCode
|
||||
|
||||
// Value provides a unique, string key, often captilized with
|
||||
// underscores, to identify the error code. This value is used as the
|
||||
// keyed value when serializing api errors.
|
||||
Value string
|
||||
|
||||
// Message is a short, human readable decription of the error condition
|
||||
// included in API responses.
|
||||
Message string
|
||||
|
||||
// Description provides a complete account of the errors purpose, suitable
|
||||
// for use in documentation.
|
||||
Description string
|
||||
|
||||
// HTTPStatusCode provides the http status code that is associated with
|
||||
// this error condition.
|
||||
HTTPStatusCode int
|
||||
}
|
||||
|
||||
// ParseErrorCode returns the value by the string error code.
|
||||
// `ErrorCodeUnknown` will be returned if the error is not known.
|
||||
func ParseErrorCode(value string) ErrorCode {
|
||||
ed, ok := idToDescriptors[value]
|
||||
if ok {
|
||||
return ed.Code
|
||||
}
|
||||
|
||||
return ErrorCodeUnknown
|
||||
}
|
||||
|
||||
// Errors provides the envelope for multiple errors and a few sugar methods
|
||||
// for use within the application.
|
||||
type Errors []error
|
||||
|
||||
var _ error = Errors{}
|
||||
|
||||
func (errs Errors) Error() string {
|
||||
switch len(errs) {
|
||||
case 0:
|
||||
return "<nil>"
|
||||
case 1:
|
||||
return errs[0].Error()
|
||||
default:
|
||||
msg := "errors:\n"
|
||||
for _, err := range errs {
|
||||
msg += err.Error() + "\n"
|
||||
}
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the current number of errors.
|
||||
func (errs Errors) Len() int {
|
||||
return len(errs)
|
||||
}
|
||||
|
||||
// MarshalJSON converts slice of error, ErrorCode or Error into a
|
||||
// slice of Error - then serializes
|
||||
func (errs Errors) MarshalJSON() ([]byte, error) {
|
||||
var tmpErrs struct {
|
||||
Errors []Error `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
for _, daErr := range errs {
|
||||
var err Error
|
||||
|
||||
switch daErr.(type) {
|
||||
case ErrorCode:
|
||||
err = daErr.(ErrorCode).WithDetail(nil)
|
||||
case Error:
|
||||
err = daErr.(Error)
|
||||
default:
|
||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
||||
|
||||
}
|
||||
|
||||
// If the Error struct was setup and they forgot to set the
|
||||
// Message field (meaning its "") then grab it from the ErrCode
|
||||
msg := err.Message
|
||||
if msg == "" {
|
||||
msg = err.Code.Message()
|
||||
}
|
||||
|
||||
tmpErrs.Errors = append(tmpErrs.Errors, Error{
|
||||
Code: err.Code,
|
||||
Message: msg,
|
||||
Detail: err.Detail,
|
||||
})
|
||||
}
|
||||
|
||||
return json.Marshal(tmpErrs)
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes []Error and then converts it into slice of
|
||||
// Error or ErrorCode
|
||||
func (errs *Errors) UnmarshalJSON(data []byte) error {
|
||||
var tmpErrs struct {
|
||||
Errors []Error
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &tmpErrs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newErrs Errors
|
||||
for _, daErr := range tmpErrs.Errors {
|
||||
// If Message is empty or exactly matches the Code's message string
|
||||
// then just use the Code, no need for a full Error struct
|
||||
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
|
||||
// Error's w/o details get converted to ErrorCode
|
||||
newErrs = append(newErrs, daErr.Code)
|
||||
} else {
|
||||
// Error's w/ details are untouched
|
||||
newErrs = append(newErrs, Error{
|
||||
Code: daErr.Code,
|
||||
Message: daErr.Message,
|
||||
Detail: daErr.Detail,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
*errs = newErrs
|
||||
return nil
|
||||
}
|
44
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
Normal file
44
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package errcode
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
|
||||
// and sets the content-type header to 'application/json'. It will handle
|
||||
// ErrorCoder and Errors, and if necessary will create an envelope.
|
||||
func ServeJSON(w http.ResponseWriter, err error) error {
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
var sc int
|
||||
|
||||
switch errs := err.(type) {
|
||||
case Errors:
|
||||
if len(errs) < 1 {
|
||||
break
|
||||
}
|
||||
|
||||
if err, ok := errs[0].(ErrorCoder); ok {
|
||||
sc = err.ErrorCode().Descriptor().HTTPStatusCode
|
||||
}
|
||||
case ErrorCoder:
|
||||
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
|
||||
err = Errors{err} // create an envelope.
|
||||
default:
|
||||
// We just have an unhandled error type, so just place in an envelope
|
||||
// and move along.
|
||||
err = Errors{err}
|
||||
}
|
||||
|
||||
if sc == 0 {
|
||||
sc = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
w.WriteHeader(sc)
|
||||
|
||||
if err := json.NewEncoder(w).Encode(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
128
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
Normal file
128
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode/register.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package errcode
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
|
||||
idToDescriptors = map[string]ErrorDescriptor{}
|
||||
groupToDescriptors = map[string][]ErrorDescriptor{}
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeUnknown is a generic error that can be used as a last
|
||||
// resort if there is no situation-specific error message that can be used
|
||||
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNKNOWN",
|
||||
Message: "unknown error",
|
||||
Description: `Generic error returned when the error does not have an
|
||||
API classification.`,
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeUnsupported is returned when an operation is not supported.
|
||||
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNSUPPORTED",
|
||||
Message: "The operation is unsupported.",
|
||||
Description: `The operation was unsupported due to a missing
|
||||
implementation or invalid set of parameters.`,
|
||||
HTTPStatusCode: http.StatusMethodNotAllowed,
|
||||
})
|
||||
|
||||
// ErrorCodeUnauthorized is returned if a request requires
|
||||
// authentication.
|
||||
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAUTHORIZED",
|
||||
Message: "authentication required",
|
||||
Description: `The access controller was unable to authenticate
|
||||
the client. Often this will be accompanied by a
|
||||
Www-Authenticate HTTP response header indicating how to
|
||||
authenticate.`,
|
||||
HTTPStatusCode: http.StatusUnauthorized,
|
||||
})
|
||||
|
||||
// ErrorCodeDenied is returned if a client does not have sufficient
|
||||
// permission to perform an action.
|
||||
ErrorCodeDenied = Register("errcode", ErrorDescriptor{
|
||||
Value: "DENIED",
|
||||
Message: "requested access to the resource is denied",
|
||||
Description: `The access controller denied access for the
|
||||
operation on a resource.`,
|
||||
HTTPStatusCode: http.StatusForbidden,
|
||||
})
|
||||
|
||||
// ErrorCodeUnavailable provides a common error to report unavialability
|
||||
// of a service or endpoint.
|
||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{
|
||||
Value: "UNAVAILABLE",
|
||||
Message: "service unavailable",
|
||||
Description: "Returned when a service is not available",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
})
|
||||
)
|
||||
|
||||
var nextCode = 1000
|
||||
var registerLock sync.Mutex
|
||||
|
||||
// Register will make the passed-in error known to the environment and
|
||||
// return a new ErrorCode
|
||||
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
|
||||
registerLock.Lock()
|
||||
defer registerLock.Unlock()
|
||||
|
||||
descriptor.Code = ErrorCode(nextCode)
|
||||
|
||||
if _, ok := idToDescriptors[descriptor.Value]; ok {
|
||||
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
|
||||
}
|
||||
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
|
||||
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
|
||||
}
|
||||
|
||||
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
|
||||
errorCodeToDescriptors[descriptor.Code] = descriptor
|
||||
idToDescriptors[descriptor.Value] = descriptor
|
||||
|
||||
nextCode++
|
||||
return descriptor.Code
|
||||
}
|
||||
|
||||
type byValue []ErrorDescriptor
|
||||
|
||||
func (a byValue) Len() int { return len(a) }
|
||||
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
|
||||
// GetGroupNames returns the list of Error group names that are registered
|
||||
func GetGroupNames() []string {
|
||||
keys := []string{}
|
||||
|
||||
for k := range groupToDescriptors {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// GetErrorCodeGroup returns the named group of error descriptors
|
||||
func GetErrorCodeGroup(name string) []ErrorDescriptor {
|
||||
desc := groupToDescriptors[name]
|
||||
sort.Sort(byValue(desc))
|
||||
return desc
|
||||
}
|
||||
|
||||
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
|
||||
// registered, irrespective of what group they're in
|
||||
func GetErrorAllDescriptors() []ErrorDescriptor {
|
||||
result := []ErrorDescriptor{}
|
||||
|
||||
for _, group := range GetGroupNames() {
|
||||
result = append(result, GetErrorCodeGroup(group)...)
|
||||
}
|
||||
sort.Sort(byValue(result))
|
||||
return result
|
||||
}
|
58
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/README.md
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/README.md
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
Docker 'errors' package
|
||||
=======================
|
||||
|
||||
This package contains all of the error messages generated by the Docker
|
||||
engine that might be exposed via the Docker engine's REST API.
|
||||
|
||||
Each top-level engine package will have its own file in this directory
|
||||
so that there's a clear grouping of errors, instead of just one big
|
||||
file. The errors for each package are defined here instead of within
|
||||
their respective package structure so that Docker CLI code that may need
|
||||
to import these error definition files will not need to know or understand
|
||||
the engine's package/directory structure. In other words, all they should
|
||||
need to do is import `.../docker/errors` and they will automatically
|
||||
pick up all Docker engine defined errors. This also gives the engine
|
||||
developers the freedom to change the engine packaging structure (e.g. to
|
||||
CRUD packages) without worrying about breaking existing clients.
|
||||
|
||||
These errors are defined using the 'errcode' package. The `errcode` package
|
||||
allows for each error to be typed and include all information necessary to
|
||||
have further processing done on them if necessary. In particular, each error
|
||||
includes:
|
||||
|
||||
* Value - a unique string (in all caps) associated with this error.
|
||||
Typically, this string is the same name as the variable name of the error
|
||||
(w/o the `ErrorCode` text) but in all caps.
|
||||
|
||||
* Message - the human readable sentence that will be displayed for this
|
||||
error. It can contain '%s' substitutions that allows for the code generating
|
||||
the error to specify values that will be inserted in the string prior to
|
||||
being displayed to the end-user. The `WithArgs()` function can be used to
|
||||
specify the insertion strings. Note, the evaluation of the strings will be
|
||||
done at the time `WithArgs()` is called.
|
||||
|
||||
* Description - additional human readable text to further explain the
|
||||
circumstances of the error situation.
|
||||
|
||||
* HTTPStatusCode - when the error is returned back to a CLI, this value
|
||||
will be used to populate the HTTP status code. If not present the default
|
||||
value will be `StatusInternalServerError`, 500.
|
||||
|
||||
Not all errors generated within the engine's executable will be propagated
|
||||
back to the engine's API layer. For example, it is expected that errors
|
||||
generated by vendored code (under `docker/vendor`) and packaged code
|
||||
(under `docker/pkg`) will be converted into errors defined by this package.
|
||||
|
||||
When processing an errcode error, if you are looking for a particular
|
||||
error then you can do something like:
|
||||
|
||||
```
|
||||
import derr "github.com/docker/docker/errors"
|
||||
|
||||
...
|
||||
|
||||
err := someFunc()
|
||||
if err.ErrorCode() == derr.ErrorCodeNoSuchContainer {
|
||||
...
|
||||
}
|
||||
```
|
93
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/builder.go
generated
vendored
Normal file
93
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/builder.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
package errors
|
||||
|
||||
// This file contains all of the errors that can be generated from the
|
||||
// docker/builder component.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeAtLeastOneArg is generated when the parser comes across a
|
||||
// Dockerfile command that doesn't have any args.
|
||||
ErrorCodeAtLeastOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "ATLEASTONEARG",
|
||||
Message: "%s requires at least one argument",
|
||||
Description: "The specified command requires at least one argument",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExactlyOneArg is generated when the parser comes across a
|
||||
// Dockerfile command that requires exactly one arg but got less/more.
|
||||
ErrorCodeExactlyOneArg = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXACTLYONEARG",
|
||||
Message: "%s requires exactly one argument",
|
||||
Description: "The specified command requires exactly one argument",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeAtLeastTwoArgs is generated when the parser comes across a
|
||||
// Dockerfile command that requires at least two args but got less.
|
||||
ErrorCodeAtLeastTwoArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "ATLEASTTWOARGS",
|
||||
Message: "%s requires at least two arguments",
|
||||
Description: "The specified command requires at least two arguments",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeTooManyArgs is generated when the parser comes across a
|
||||
// Dockerfile command that has more args than it should
|
||||
ErrorCodeTooManyArgs = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "TOOMANYARGS",
|
||||
Message: "Bad input to %s, too many args",
|
||||
Description: "The specified command was passed too many arguments",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeChainOnBuild is generated when the parser comes across a
|
||||
// Dockerfile command that is trying to chain ONBUILD commands.
|
||||
ErrorCodeChainOnBuild = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CHAINONBUILD",
|
||||
Message: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
|
||||
Description: "ONBUILD Dockerfile commands aren't allow on ONBUILD commands",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeBadOnBuildCmd is generated when the parser comes across a
|
||||
// an ONBUILD Dockerfile command with an invalid trigger/command.
|
||||
ErrorCodeBadOnBuildCmd = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "BADONBUILDCMD",
|
||||
Message: "%s isn't allowed as an ONBUILD trigger",
|
||||
Description: "The specified ONBUILD command isn't allowed",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeMissingFrom is generated when the Dockerfile is missing
|
||||
// a FROM command.
|
||||
ErrorCodeMissingFrom = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "MISSINGFROM",
|
||||
Message: "Please provide a source image with `from` prior to run",
|
||||
Description: "The Dockerfile is missing a FROM command",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotOnWindows is generated when the specified Dockerfile
|
||||
// command is not supported on Windows.
|
||||
ErrorCodeNotOnWindows = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTONWINDOWS",
|
||||
Message: "%s is not supported on Windows",
|
||||
Description: "The specified Dockerfile command is not supported on Windows",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeEmpty is generated when the specified Volume string
|
||||
// is empty.
|
||||
ErrorCodeVolumeEmpty = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEEMPTY",
|
||||
Message: "Volume specified can not be an empty string",
|
||||
Description: "The specified volume can not be an empty string",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
)
|
925
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/daemon.go
generated
vendored
Normal file
925
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/daemon.go
generated
vendored
Normal file
|
@ -0,0 +1,925 @@
|
|||
package errors
|
||||
|
||||
// This file contains all of the errors that can be generated from the
|
||||
// docker/daemon component.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeNoSuchContainer is generated when we look for a container by
|
||||
// name or ID and we can't find it.
|
||||
ErrorCodeNoSuchContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOSUCHCONTAINER",
|
||||
Message: "no such id: %s",
|
||||
Description: "The specified container can not be found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
// ErrorCodeUnregisteredContainer is generated when we try to load
|
||||
// a storage driver for an unregistered container
|
||||
ErrorCodeUnregisteredContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "UNREGISTEREDCONTAINER",
|
||||
Message: "Can't load storage driver for unregistered container %s",
|
||||
Description: "An attempt was made to load the storage driver for a container that is not registered with the daemon",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeContainerBeingRemoved is generated when an attempt to start
|
||||
// a container is made but its in the process of being removed, or is dead.
|
||||
ErrorCodeContainerBeingRemoved = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CONTAINERBEINGREMOVED",
|
||||
Message: "Container is marked for removal and cannot be started.",
|
||||
Description: "An attempt was made to start a container that is in the process of being deleted",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeUnpauseContainer is generated when we attempt to stop a
|
||||
// container but its paused.
|
||||
ErrorCodeUnpauseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "UNPAUSECONTAINER",
|
||||
Message: "Container %s is paused. Unpause the container before stopping",
|
||||
Description: "The specified container is paused, before it can be stopped it must be unpaused",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeAlreadyPaused is generated when we attempt to pause a
|
||||
// container when its already paused.
|
||||
ErrorCodeAlreadyPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "ALREADYPAUSED",
|
||||
Message: "Container %s is already paused",
|
||||
Description: "The specified container is already in the paused state",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotPaused is generated when we attempt to unpause a
|
||||
// container when its not paused.
|
||||
ErrorCodeNotPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTPAUSED",
|
||||
Message: "Container %s is not paused",
|
||||
Description: "The specified container can not be unpaused because it is not in a paused state",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeImageUnregContainer is generated when we attempt to get the
|
||||
// image of an unknown/unregistered container.
|
||||
ErrorCodeImageUnregContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "IMAGEUNREGCONTAINER",
|
||||
Message: "Can't get image of unregistered container",
|
||||
Description: "An attempt to retrieve the image of a container was made but the container is not registered",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeEmptyID is generated when an ID is the emptry string.
|
||||
ErrorCodeEmptyID = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EMPTYID",
|
||||
Message: "Invalid empty id",
|
||||
Description: "An attempt was made to register a container but the container's ID can not be an empty string",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeLoggingFactory is generated when we could not load the
|
||||
// log driver.
|
||||
ErrorCodeLoggingFactory = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "LOGGINGFACTORY",
|
||||
Message: "Failed to get logging factory: %v",
|
||||
Description: "An attempt was made to register a container but the container's ID can not be an empty string",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeInitLogger is generated when we could not initialize
|
||||
// the logging driver.
|
||||
ErrorCodeInitLogger = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "INITLOGGER",
|
||||
Message: "Failed to initialize logging driver: %v",
|
||||
Description: "An error occurred while trying to initialize the logging driver",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotRunning is generated when we need to verify that
|
||||
// a container is running, but its not.
|
||||
ErrorCodeNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTRUNNING",
|
||||
Message: "Container %s is not running",
|
||||
Description: "The specified action can not be taken due to the container not being in a running state",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeLinkNotRunning is generated when we try to link to a
|
||||
// container that is not running.
|
||||
ErrorCodeLinkNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "LINKNOTRUNNING",
|
||||
Message: "Cannot link to a non running container: %s AS %s",
|
||||
Description: "An attempt was made to link to a container but the container is not in a running state",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeDeviceInfo is generated when there is an error while trying
|
||||
// to get info about a custom device.
|
||||
// container that is not running.
|
||||
ErrorCodeDeviceInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "DEVICEINFO",
|
||||
Message: "error gathering device information while adding custom device %q: %s",
|
||||
Description: "There was an error while trying to retrieve the information about a custom device",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeEmptyEndpoint is generated when the endpoint for a port
|
||||
// map is nil.
|
||||
ErrorCodeEmptyEndpoint = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EMPTYENDPOINT",
|
||||
Message: "invalid endpoint while building port map info",
|
||||
Description: "The specified endpoint for the port mapping is empty",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeEmptyNetwork is generated when the networkSettings for a port
|
||||
// map is nil.
|
||||
ErrorCodeEmptyNetwork = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EMPTYNETWORK",
|
||||
Message: "invalid networksettings while building port map info",
|
||||
Description: "The specified endpoint for the port mapping is empty",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeParsingPort is generated when there is an error parsing
|
||||
// a "port" string.
|
||||
ErrorCodeParsingPort = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PARSINGPORT",
|
||||
Message: "Error parsing Port value(%v):%v",
|
||||
Description: "There was an error while trying to parse the specified 'port' value",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNoSandbox is generated when we can't find the specified
|
||||
// sandbox(network) by ID.
|
||||
ErrorCodeNoSandbox = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOSANDBOX",
|
||||
Message: "error locating sandbox id %s: %v",
|
||||
Description: "There was an error trying to located the specified networking sandbox",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNetworkUpdate is generated when there is an error while
|
||||
// trying update a network/sandbox config.
|
||||
ErrorCodeNetworkUpdate = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NETWORKUPDATE",
|
||||
Message: "Update network failed: %v",
|
||||
Description: "There was an error trying to update the configuration information of the specified network sandbox",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNetworkRefresh is generated when there is an error while
|
||||
// trying refresh a network/sandbox config.
|
||||
ErrorCodeNetworkRefresh = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NETWORKREFRESH",
|
||||
Message: "Update network failed: Failure in refresh sandbox %s: %v",
|
||||
Description: "There was an error trying to refresh the configuration information of the specified network sandbox",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeHostPort is generated when there was an error while trying
|
||||
// to parse a "host/port" string.
|
||||
ErrorCodeHostPort = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "HOSTPORT",
|
||||
Message: "Error parsing HostPort value(%s):%v",
|
||||
Description: "There was an error trying to parse the specified 'HostPort' value",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNetworkConflict is generated when we try to publish a service
|
||||
// in network mode.
|
||||
ErrorCodeNetworkConflict = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NETWORKCONFLICT",
|
||||
Message: "conflicting options: publishing a service and network mode",
|
||||
Description: "It is not possible to publish a service when it is in network mode",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeJoinInfo is generated when we failed to update a container's
|
||||
// join info.
|
||||
ErrorCodeJoinInfo = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "JOININFO",
|
||||
Message: "Updating join info failed: %v",
|
||||
Description: "There was an error during an attempt update a container's join information",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeIPCRunning is generated when we try to join a container's
|
||||
// IPC but its not running.
|
||||
ErrorCodeIPCRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "IPCRUNNING",
|
||||
Message: "cannot join IPC of a non running container: %s",
|
||||
Description: "An attempt was made to join the IPC of a container, but the container is not running",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotADir is generated when we try to create a directory
|
||||
// but the path isn't a dir.
|
||||
ErrorCodeNotADir = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTADIR",
|
||||
Message: "Cannot mkdir: %s is not a directory",
|
||||
Description: "An attempt was made create a directory, but the location in which it is being created is not a directory",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeParseContainer is generated when the reference to a
|
||||
// container doesn't include a ":" (another container).
|
||||
ErrorCodeParseContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PARSECONTAINER",
|
||||
Message: "no container specified to join network",
|
||||
Description: "The specified reference to a container is missing a ':' as a separator between 'container' and 'name'/'id'",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeJoinSelf is generated when we try to network to ourselves.
|
||||
ErrorCodeJoinSelf = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "JOINSELF",
|
||||
Message: "cannot join own network",
|
||||
Description: "An attempt was made to have a container join its own network",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeJoinRunning is generated when we try to network to ourselves.
|
||||
ErrorCodeJoinRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "JOINRUNNING",
|
||||
Message: "cannot join network of a non running container: %s",
|
||||
Description: "An attempt to join the network of a container, but that container isn't running",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeModeNotContainer is generated when we try to network to
|
||||
// another container but the mode isn't 'container'.
|
||||
ErrorCodeModeNotContainer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "MODENOTCONTAINER",
|
||||
Message: "network mode not set to container",
|
||||
Description: "An attempt was made to connect to a container's network but the mode wasn't set to 'container'",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRemovingVolume is generated when we try remove a mount
|
||||
// point (volume) but fail.
|
||||
ErrorCodeRemovingVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "REMOVINGVOLUME",
|
||||
Message: "Error removing volumes:\n%v",
|
||||
Description: "There was an error while trying to remove the mount point (volume) of a container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeInvalidNetworkMode is generated when an invalid network
|
||||
// mode value is specified.
|
||||
ErrorCodeInvalidNetworkMode = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "INVALIDNETWORKMODE",
|
||||
Message: "invalid network mode: %s",
|
||||
Description: "The specified networking mode is not valid",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeGetGraph is generated when there was an error while
|
||||
// trying to find a graph/image.
|
||||
ErrorCodeGetGraph = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "GETGRAPH",
|
||||
Message: "Failed to graph.Get on ImageID %s - %s",
|
||||
Description: "There was an error trying to retrieve the image for the specified image ID",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeGetLayer is generated when there was an error while
|
||||
// trying to retrieve a particular layer of an image.
|
||||
ErrorCodeGetLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "GETLAYER",
|
||||
Message: "Failed to get layer path from graphdriver %s for ImageID %s - %s",
|
||||
Description: "There was an error trying to retrieve the layer of the specified image",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodePutLayer is generated when there was an error while
|
||||
// trying to 'put' a particular layer of an image.
|
||||
ErrorCodePutLayer = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PUTLAYER",
|
||||
Message: "Failed to put layer path from graphdriver %s for ImageID %s - %s",
|
||||
Description: "There was an error trying to store a layer for the specified image",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeGetLayerMetadata is generated when there was an error while
|
||||
// trying to retrieve the metadata of a layer of an image.
|
||||
ErrorCodeGetLayerMetadata = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "GETLAYERMETADATA",
|
||||
Message: "Failed to get layer metadata - %s",
|
||||
Description: "There was an error trying to retrieve the metadata of a layer for the specified image",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeEmptyConfig is generated when the input config data
|
||||
// is empty.
|
||||
ErrorCodeEmptyConfig = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EMPTYCONFIG",
|
||||
Message: "Config cannot be empty in order to create a container",
|
||||
Description: "While trying to create a container, the specified configuration information was empty",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNoSuchImageHash is generated when we can't find the
|
||||
// specified image by its hash
|
||||
ErrorCodeNoSuchImageHash = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOSUCHIMAGEHASH",
|
||||
Message: "No such image: %s",
|
||||
Description: "An attempt was made to find an image by its hash, but the lookup failed",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
// ErrorCodeNoSuchImageTag is generated when we can't find the
|
||||
// specified image byt its name/tag.
|
||||
ErrorCodeNoSuchImageTag = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOSUCHIMAGETAG",
|
||||
Message: "No such image: %s:%s",
|
||||
Description: "An attempt was made to find an image by its name/tag, but the lookup failed",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
// ErrorCodeMountOverFile is generated when we try to mount a volume
|
||||
// over an existing file (but not a dir).
|
||||
ErrorCodeMountOverFile = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "MOUNTOVERFILE",
|
||||
Message: "cannot mount volume over existing file, file exists %s",
|
||||
Description: "An attempt was made to mount a volume at the same location as a pre-existing file",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeMountSetup is generated when we can't define a mount point
|
||||
// due to the source and destination being undefined.
|
||||
ErrorCodeMountSetup = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "MOUNTSETUP",
|
||||
Message: "Unable to setup mount point, neither source nor volume defined",
|
||||
Description: "An attempt was made to setup a mount point, but the source and destination are undefined",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeInvalidMode is generated when we the mode of a volume/bind
|
||||
// mount is invalid.
|
||||
ErrorCodeVolumeInvalidMode = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEINVALIDMODE",
|
||||
Message: "invalid mode: %s",
|
||||
Description: "An invalid 'mode' was specified",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeInvalid is generated when the format fo the
|
||||
// volume specification isn't valid.
|
||||
ErrorCodeVolumeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEINVALID",
|
||||
Message: "Invalid volume specification: %s",
|
||||
Description: "An invalid 'volume' was specified in the mount request",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeAbs is generated when path to a volume isn't absolute.
|
||||
ErrorCodeVolumeAbs = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEABS",
|
||||
Message: "Invalid volume destination path: %s mount path must be absolute.",
|
||||
Description: "An invalid 'destination' path was specified in the mount request, it must be an absolute path",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeName is generated when the name of named volume isn't valid.
|
||||
ErrorCodeVolumeName = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUME_NAME_INVALID",
|
||||
Message: "%s includes invalid characters for a local volume name, only %s are allowed",
|
||||
Description: "The name of volume is invalid",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeSlash is generated when destination path to a volume is /
|
||||
ErrorCodeVolumeSlash = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMESLASH",
|
||||
Message: "Invalid specification: destination can't be '/' in '%s'",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeDestIsC is generated the destination is c: (Windows specific)
|
||||
ErrorCodeVolumeDestIsC = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEDESTISC",
|
||||
Message: "Destination drive letter in '%s' cannot be c:",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeDestIsCRoot is generated the destination path is c:\ (Windows specific)
|
||||
ErrorCodeVolumeDestIsCRoot = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEDESTISCROOT",
|
||||
Message: `Destination path in '%s' cannot be c:\`,
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeSourceNotFound is generated the source directory could not be found (Windows specific)
|
||||
ErrorCodeVolumeSourceNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMESOURCENOTFOUND",
|
||||
Message: "Source directory '%s' could not be found: %v",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeSourceNotDirectory is generated the source is not a directory (Windows specific)
|
||||
ErrorCodeVolumeSourceNotDirectory = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMESOURCENOTDIRECTORY",
|
||||
Message: "Source '%s' is not a directory",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeFromBlank is generated when path to a volume is blank.
|
||||
ErrorCodeVolumeFromBlank = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEFROMBLANK",
|
||||
Message: "malformed volumes-from specification: %s",
|
||||
Description: "An invalid 'destination' path was specified in the mount request, it must not be blank",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeDup is generated when we try to mount two volumes
|
||||
// to the same path.
|
||||
ErrorCodeVolumeDup = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMEDUP",
|
||||
Message: "Duplicate bind mount %s",
|
||||
Description: "An attempt was made to mount a volume but the specified destination location is already used in a previous mount",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeNoSourceForMount is generated when no source directory
|
||||
// for a volume mount was found. (Windows specific)
|
||||
ErrorCodeVolumeNoSourceForMount = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMENOSOURCEFORMOUNT",
|
||||
Message: "No source for mount name %q driver %q destination %s",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeVolumeNameReservedWord is generated when the name in a volume
|
||||
// uses a reserved word for filenames. (Windows specific)
|
||||
ErrorCodeVolumeNameReservedWord = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUMENAMERESERVEDWORD",
|
||||
Message: "Volume name %q cannot be a reserved word for Windows filenames",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeCantUnpause is generated when there's an error while trying
|
||||
// to unpause a container.
|
||||
ErrorCodeCantUnpause = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CANTUNPAUSE",
|
||||
Message: "Cannot unpause container %s: %s",
|
||||
Description: "An error occurred while trying to unpause the specified container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodePSError is generated when trying to run 'ps'.
|
||||
ErrorCodePSError = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PSError",
|
||||
Message: "Error running ps: %s",
|
||||
Description: "There was an error trying to run the 'ps' command in the specified container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNoPID is generated when looking for the PID field in the
|
||||
// ps output.
|
||||
ErrorCodeNoPID = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOPID",
|
||||
Message: "Couldn't find PID field in ps output",
|
||||
Description: "There was no 'PID' field in the output of the 'ps' command that was executed",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeBadPID is generated when we can't convert a PID to an int.
|
||||
ErrorCodeBadPID = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "BADPID",
|
||||
Message: "Unexpected pid '%s': %s",
|
||||
Description: "While trying to parse the output of the 'ps' command, the 'PID' field was not an integer",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNoTop is generated when we try to run 'top' but can't
|
||||
// because we're on windows.
|
||||
ErrorCodeNoTop = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTOP",
|
||||
Message: "Top is not supported on Windows",
|
||||
Description: "The 'top' command is not supported on Windows",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeStopped is generated when we try to stop a container
|
||||
// that is already stopped.
|
||||
ErrorCodeStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "STOPPED",
|
||||
Message: "Container already stopped",
|
||||
Description: "An attempt was made to stop a container, but the container is already stopped",
|
||||
HTTPStatusCode: http.StatusNotModified,
|
||||
})
|
||||
|
||||
// ErrorCodeCantStop is generated when we try to stop a container
|
||||
// but failed for some reason.
|
||||
ErrorCodeCantStop = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CANTSTOP",
|
||||
Message: "Cannot stop container %s: %s\n",
|
||||
Description: "An error occurred while tring to stop the specified container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeBadCPUFields is generated when the number of CPU fields is
|
||||
// less than 8.
|
||||
ErrorCodeBadCPUFields = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "BADCPUFIELDS",
|
||||
Message: "invalid number of cpu fields",
|
||||
Description: "While reading the '/proc/stat' file, the number of 'cpu' fields is less than 8",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeBadCPUInt is generated the CPU field can't be parsed as an int.
|
||||
ErrorCodeBadCPUInt = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "BADCPUINT",
|
||||
Message: "Unable to convert value %s to int: %s",
|
||||
Description: "While reading the '/proc/stat' file, the 'CPU' field could not be parsed as an integer",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeBadStatFormat is generated the output of the stat info
|
||||
// isn't parseable.
|
||||
ErrorCodeBadStatFormat = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "BADSTATFORMAT",
|
||||
Message: "invalid stat format",
|
||||
Description: "There was an error trying to parse the '/proc/stat' file",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeTimedOut is generated when a timer expires.
|
||||
ErrorCodeTimedOut = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "TIMEDOUT",
|
||||
Message: "Timed out: %v",
|
||||
Description: "A timer expired",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeAlreadyRemoving is generated when we try to remove a
|
||||
// container that is already being removed.
|
||||
ErrorCodeAlreadyRemoving = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "ALREADYREMOVING",
|
||||
Message: "Status is already RemovalInProgress",
|
||||
Description: "An attempt to remove a container was made, but the container is already in the process of being removed",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeStartPaused is generated when we start a paused container.
|
||||
ErrorCodeStartPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "STARTPAUSED",
|
||||
Message: "Cannot start a paused container, try unpause instead.",
|
||||
Description: "An attempt to start a container was made, but the container is paused. Unpause it first",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeAlreadyStarted is generated when we try to start a container
|
||||
// that is already running.
|
||||
ErrorCodeAlreadyStarted = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "ALREADYSTARTED",
|
||||
Message: "Container already started",
|
||||
Description: "An attempt to start a container was made, but the container is already started",
|
||||
HTTPStatusCode: http.StatusNotModified,
|
||||
})
|
||||
|
||||
// ErrorCodeHostConfigStart is generated when a HostConfig is passed
|
||||
// into the start command.
|
||||
ErrorCodeHostConfigStart = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "HOSTCONFIGSTART",
|
||||
Message: "Supplying a hostconfig on start is not supported. It should be supplied on create",
|
||||
Description: "The 'start' command does not accept 'HostConfig' data, try using the 'create' command instead",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeCantStart is generated when an error occurred while
|
||||
// trying to start a container.
|
||||
ErrorCodeCantStart = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CANTSTART",
|
||||
Message: "Cannot start container %s: %s",
|
||||
Description: "There was an error while trying to start a container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeCantRestart is generated when an error occurred while
|
||||
// trying to restart a container.
|
||||
ErrorCodeCantRestart = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CANTRESTART",
|
||||
Message: "Cannot restart container %s: %s",
|
||||
Description: "There was an error while trying to restart a container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeEmptyRename is generated when one of the names on a
|
||||
// rename is empty.
|
||||
ErrorCodeEmptyRename = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EMPTYRENAME",
|
||||
Message: "Neither old nor new names may be empty",
|
||||
Description: "An attempt was made to rename a container but either the old or new names were blank",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRenameTaken is generated when we try to rename but the
|
||||
// new name isn't available.
|
||||
ErrorCodeRenameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RENAMETAKEN",
|
||||
Message: "Error when allocating new name: %s",
|
||||
Description: "The new name specified on the 'rename' command is already being used",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRenameDelete is generated when we try to rename but
|
||||
// failed trying to delete the old container.
|
||||
ErrorCodeRenameDelete = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RENAMEDELETE",
|
||||
Message: "Failed to delete container %q: %v",
|
||||
Description: "There was an error trying to delete the specified container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodePauseError is generated when we try to pause a container
|
||||
// but failed.
|
||||
ErrorCodePauseError = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PAUSEERROR",
|
||||
Message: "Cannot pause container %s: %s",
|
||||
Description: "There was an error trying to pause the specified container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNeedStream is generated when we try to stream a container's
|
||||
// logs but no output stream was specified.
|
||||
ErrorCodeNeedStream = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NEEDSTREAM",
|
||||
Message: "You must choose at least one stream",
|
||||
Description: "While trying to stream a container's logs, no output stream was specified",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeDanglingOne is generated when we try to specify more than one
|
||||
// 'dangling' specifier.
|
||||
ErrorCodeDanglingOne = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "DANLGINGONE",
|
||||
Message: "Conflict: cannot use more than 1 value for `dangling` filter",
|
||||
Description: "The specified 'dangling' filter may not have more than one value",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeImgDelUsed is generated when we try to delete an image
|
||||
// but it is being used.
|
||||
ErrorCodeImgDelUsed = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "IMGDELUSED",
|
||||
Message: "conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s",
|
||||
Description: "An attempt was made to delete an image but it is currently being used",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeImgNoParent is generated when we try to find an image's
|
||||
// parent but its not in the graph.
|
||||
ErrorCodeImgNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "IMGNOPARENT",
|
||||
Message: "unable to get parent image: %v",
|
||||
Description: "There was an error trying to find an image's parent, it was not in the graph",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExportFailed is generated when an export fails.
|
||||
ErrorCodeExportFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXPORTFAILED",
|
||||
Message: "%s: %s",
|
||||
Description: "There was an error during an export operation",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExecResize is generated when we try to resize an exec
|
||||
// but its not running.
|
||||
ErrorCodeExecResize = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECRESIZE",
|
||||
Message: "Exec %s is not running, so it can not be resized.",
|
||||
Description: "An attempt was made to resize an 'exec', but the 'exec' is not running",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeContainerNotRunning is generated when we try to get the info
|
||||
// on an exec but the container is not running.
|
||||
ErrorCodeContainerNotRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CONTAINERNOTRUNNING",
|
||||
Message: "Container %s is not running: %s",
|
||||
Description: "An attempt was made to retrieve the information about an 'exec' but the container is not running",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeNoExecID is generated when we try to get the info
|
||||
// on an exec but it can't be found.
|
||||
ErrorCodeNoExecID = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOEXECID",
|
||||
Message: "No such exec instance '%s' found in daemon",
|
||||
Description: "The specified 'exec' instance could not be found",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
// ErrorCodeExecPaused is generated when we try to start an exec
|
||||
// but the container is paused.
|
||||
ErrorCodeExecPaused = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECPAUSED",
|
||||
Message: "Container %s is paused, unpause the container before exec",
|
||||
Description: "An attempt to start an 'exec' was made, but the owning container is paused",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeExecRunning is generated when we try to start an exec
|
||||
// but its already running.
|
||||
ErrorCodeExecRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECRUNNING",
|
||||
Message: "Error: Exec command %s is already running",
|
||||
Description: "An attempt to start an 'exec' was made, but 'exec' is already running",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExecCantRun is generated when we try to start an exec
|
||||
// but it failed for some reason.
|
||||
ErrorCodeExecCantRun = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECCANTRUN",
|
||||
Message: "Cannot run exec command %s in container %s: %s",
|
||||
Description: "An attempt to start an 'exec' was made, but an error occurred",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExecAttach is generated when we try to attach to an exec
|
||||
// but failed.
|
||||
ErrorCodeExecAttach = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECATTACH",
|
||||
Message: "attach failed with error: %s",
|
||||
Description: "There was an error while trying to attach to an 'exec'",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeExecContainerStopped is generated when we try to start
|
||||
// an exec but then the container stopped.
|
||||
ErrorCodeExecContainerStopped = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "EXECCONTAINERSTOPPED",
|
||||
Message: "container stopped while running exec",
|
||||
Description: "An attempt was made to start an 'exec' but the owning container is in the 'stopped' state",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeDefaultName is generated when we try to delete the
|
||||
// default name of a container.
|
||||
ErrorCodeDefaultName = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "DEFAULTNAME",
|
||||
Message: "Conflict, cannot remove the default name of the container",
|
||||
Description: "An attempt to delete the default name of a container was made, but that is not allowed",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeNoParent is generated when we try to delete a container
|
||||
// but we can't find its parent image.
|
||||
ErrorCodeNoParent = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOPARENT",
|
||||
Message: "Cannot get parent %s for name %s",
|
||||
Description: "An attempt was made to delete a container but its parent image could not be found",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeCantDestroy is generated when we try to delete a container
|
||||
// but failed for some reason.
|
||||
ErrorCodeCantDestroy = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "CANTDESTROY",
|
||||
Message: "Cannot destroy container %s: %v",
|
||||
Description: "An attempt was made to delete a container but it failed",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmRunning is generated when we try to delete a container
|
||||
// but its still running.
|
||||
ErrorCodeRmRunning = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMRUNNING",
|
||||
Message: "Conflict, You cannot remove a running container. Stop the container before attempting removal or use -f",
|
||||
Description: "An attempt was made to delete a container but the container is still running, try to either stop it first or use '-f'",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeRmFailed is generated when we try to delete a container
|
||||
// but it failed for some reason.
|
||||
ErrorCodeRmFailed = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMFAILED",
|
||||
Message: "Could not kill running container, cannot remove - %v",
|
||||
Description: "An error occurred while trying to delete a running container",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmNotFound is generated when we try to delete a container
|
||||
// but couldn't find it.
|
||||
ErrorCodeRmNotFound = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMNOTFOUND",
|
||||
Message: "Could not kill running container, cannot remove - %v",
|
||||
Description: "An attempt to delete a container was made but the container could not be found",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmState is generated when we try to delete a container
|
||||
// but couldn't set its state to RemovalInProgress.
|
||||
ErrorCodeRmState = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMSTATE",
|
||||
Message: "Failed to set container state to RemovalInProgress: %s",
|
||||
Description: "An attempt to delete a container was made, but there as an error trying to set its state to 'RemovalInProgress'",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmDriverFS is generated when we try to delete a container
|
||||
// but the driver failed to delete its filesystem.
|
||||
ErrorCodeRmDriverFS = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMDRIVERFS",
|
||||
Message: "Driver %s failed to remove root filesystem %s: %s",
|
||||
Description: "While trying to delete a container, the driver failed to remove the root filesystem",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmInit is generated when we try to delete a container
|
||||
// but failed deleting its init filesystem.
|
||||
ErrorCodeRmInit = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMINIT",
|
||||
Message: "Driver %s failed to remove init filesystem %s: %s",
|
||||
Description: "While trying to delete a container, the driver failed to remove the init filesystem",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmFS is generated when we try to delete a container
|
||||
// but failed deleting its filesystem.
|
||||
ErrorCodeRmFS = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMFS",
|
||||
Message: "Unable to remove filesystem for %v: %v",
|
||||
Description: "While trying to delete a container, the driver failed to remove the filesystem",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmExecDriver is generated when we try to delete a container
|
||||
// but failed deleting its exec driver data.
|
||||
ErrorCodeRmExecDriver = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMEXECDRIVER",
|
||||
Message: "Unable to remove execdriver data for %s: %s",
|
||||
Description: "While trying to delete a container, there was an error trying to remove th exec driver data",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeRmVolumeInUse is generated when we try to delete a container
|
||||
// but failed deleting a volume because its being used.
|
||||
ErrorCodeRmVolumeInUse = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMVOLUMEINUSE",
|
||||
Message: "Conflict: %v",
|
||||
Description: "While trying to delete a container, one of its volumes is still being used",
|
||||
HTTPStatusCode: http.StatusConflict,
|
||||
})
|
||||
|
||||
// ErrorCodeRmVolume is generated when we try to delete a container
|
||||
// but failed deleting a volume.
|
||||
ErrorCodeRmVolume = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "RMVOLUME",
|
||||
Message: "Error while removing volume %s: %v",
|
||||
Description: "While trying to delete a container, there was an error trying to delete one of its volumes",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeInvalidCpusetCpus is generated when user provided cpuset CPUs
|
||||
// are invalid.
|
||||
ErrorCodeInvalidCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "INVALIDCPUSETCPUS",
|
||||
Message: "Invalid value %s for cpuset cpus.",
|
||||
Description: "While verifying the container's 'HostConfig', CpusetCpus value was in an incorrect format",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeInvalidCpusetMems is generated when user provided cpuset mems
|
||||
// are invalid.
|
||||
ErrorCodeInvalidCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "INVALIDCPUSETMEMS",
|
||||
Message: "Invalid value %s for cpuset mems.",
|
||||
Description: "While verifying the container's 'HostConfig', CpusetMems value was in an incorrect format",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotAvailableCpusetCpus is generated when user provided cpuset
|
||||
// CPUs aren't available in the container's cgroup.
|
||||
ErrorCodeNotAvailableCpusetCpus = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTAVAILABLECPUSETCPUS",
|
||||
Message: "Requested CPUs are not available - requested %s, available: %s.",
|
||||
Description: "While verifying the container's 'HostConfig', cpuset CPUs provided aren't available in the container's cgroup available set",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorCodeNotAvailableCpusetMems is generated when user provided cpuset
|
||||
// memory nodes aren't available in the container's cgroup.
|
||||
ErrorCodeNotAvailableCpusetMems = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NOTAVAILABLECPUSETMEMS",
|
||||
Message: "Requested memory nodes are not available - requested %s, available: %s.",
|
||||
Description: "While verifying the container's 'HostConfig', cpuset memory nodes provided aren't available in the container's cgroup available set",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
|
||||
// ErrorVolumeNameTaken is generated when an error occurred while
|
||||
// trying to create a volume that has existed using different driver.
|
||||
ErrorVolumeNameTaken = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "VOLUME_NAME_TAKEN",
|
||||
Message: "A volume name %s already exists with the %s driver. Choose a different volume name.",
|
||||
Description: "An attempt to create a volume using a driver but the volume already exists with a different driver",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
)
|
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/error.go
generated
vendored
Normal file
6
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/error.go
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
package errors
|
||||
|
||||
// This file contains all of the errors that can be generated from the
|
||||
// docker engine but are not tied to any specific top-level component.
|
||||
|
||||
const errGroup = "engine"
|
20
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/image.go
generated
vendored
Normal file
20
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/image.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package errors
|
||||
|
||||
// This file contains all of the errors that can be generated from the
|
||||
// docker/image component.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeInvalidImageID is generated when image id specified is incorrectly formatted.
|
||||
ErrorCodeInvalidImageID = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "INVALIDIMAGEID",
|
||||
Message: "image ID '%s' is invalid ",
|
||||
Description: "The specified image id is incorrectly formatted",
|
||||
HTTPStatusCode: http.StatusInternalServerError,
|
||||
})
|
||||
)
|
36
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/server.go
generated
vendored
Normal file
36
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/errors/server.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package errors
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/distribution/registry/api/errcode"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrorCodeNewerClientVersion is generated when a request from a client
|
||||
// specifies a higher version than the server supports.
|
||||
ErrorCodeNewerClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NEWERCLIENTVERSION",
|
||||
Message: "client is newer than server (client API version: %s, server API version: %s)",
|
||||
Description: "The client version is higher than the server version",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
|
||||
// ErrorCodeOldClientVersion is generated when a request from a client
|
||||
// specifies a version lower than the minimum version supported by the server.
|
||||
ErrorCodeOldClientVersion = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "OLDCLIENTVERSION",
|
||||
Message: "client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version",
|
||||
Description: "The client version is too old for the server",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
|
||||
// ErrorNetworkControllerNotEnabled is generated when the networking stack in not enabled
|
||||
// for certain platforms, like windows.
|
||||
ErrorNetworkControllerNotEnabled = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "NETWORK_CONTROLLER_NOT_ENABLED",
|
||||
Message: "the network controller is not enabled for this platform",
|
||||
Description: "Docker's networking stack is disabled for this platform",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
)
|
|
@ -4,18 +4,22 @@ import (
|
|||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// EnvironmentVariableRegexp A regexp to validate correct environment variables
|
||||
// Environment variables set by the user must have a name consisting solely of
|
||||
// alphabetics, numerics, and underscores - the first of which must not be numeric.
|
||||
EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$")
|
||||
)
|
||||
|
||||
// ParseEnvFile Read in a line delimited file with environment variables enumerated
|
||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
||||
//
|
||||
// ``Environment variable names used by the utilities in the Shell and
|
||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||
// Portable Character Set and do not begin with a digit. *But*, other
|
||||
// characters may be permitted by an implementation; applications shall
|
||||
// tolerate the presence of such names.''
|
||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||
//
|
||||
// As of #16585, it's up to application inside docker to validate or not
|
||||
// environment variables, that's why we just strip leading whitespace and
|
||||
// nothing more.
|
||||
func ParseEnvFile(filename string) ([]string, error) {
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
|
@ -26,17 +30,18 @@ func ParseEnvFile(filename string) ([]string, error) {
|
|||
lines := []string{}
|
||||
scanner := bufio.NewScanner(fh)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeft(scanner.Text(), whiteSpaces)
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
|
||||
if !EnvironmentVariableRegexp.MatchString(variable) {
|
||||
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)}
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
|
||||
// pass the value through, no trimming
|
||||
|
|
|
@ -1,133 +0,0 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func tmpFileWithContent(content string, t *testing.T) string {
|
||||
tmpFile, err := ioutil.TempFile("", "envfile-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
|
||||
tmpFile.WriteString(content)
|
||||
return tmpFile.Name()
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a file with a few well formatted lines
|
||||
func TestParseEnvFileGoodFile(t *testing.T) {
|
||||
content := `foo=bar
|
||||
baz=quux
|
||||
# comment
|
||||
|
||||
_foobar=foobaz
|
||||
`
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
lines, err := ParseEnvFile(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedLines := []string{
|
||||
"foo=bar",
|
||||
"baz=quux",
|
||||
"_foobar=foobaz",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(lines, expectedLines) {
|
||||
t.Fatal("lines not equal to expected_lines")
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for an empty file
|
||||
func TestParseEnvFileEmptyFile(t *testing.T) {
|
||||
tmpFile := tmpFileWithContent("", t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
lines, err := ParseEnvFile(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(lines) != 0 {
|
||||
t.Fatal("lines not empty; expected empty")
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a non existent file
|
||||
func TestParseEnvFileNonExistentFile(t *testing.T) {
|
||||
_, err := ParseEnvFile("foo_bar_baz")
|
||||
if err == nil {
|
||||
t.Fatal("ParseEnvFile succeeded; expected failure")
|
||||
}
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Fatalf("Expected a PathError, got [%v]", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a badly formatted file
|
||||
func TestParseEnvFileBadlyFormattedFile(t *testing.T) {
|
||||
content := `foo=bar
|
||||
f =quux
|
||||
`
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err)
|
||||
}
|
||||
expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable"
|
||||
if err.Error() != expectedMessage {
|
||||
t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize
|
||||
func TestParseEnvFileLineTooLongFile(t *testing.T) {
|
||||
content := strings.Repeat("a", bufio.MaxScanTokenSize+42)
|
||||
content = fmt.Sprint("foo=", content)
|
||||
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
if err == nil {
|
||||
t.Fatal("ParseEnvFile succeeded; expected failure")
|
||||
}
|
||||
}
|
||||
|
||||
// ParseEnvFile with a random file, pass through
|
||||
func TestParseEnvFileRandomFile(t *testing.T) {
|
||||
content := `first line
|
||||
another invalid line`
|
||||
tmpFile := tmpFileWithContent(content, t)
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
_, err := ParseEnvFile(tmpFile)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("Expected a ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err)
|
||||
}
|
||||
expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable"
|
||||
if err.Error() != expectedMessage {
|
||||
t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error())
|
||||
}
|
||||
}
|
|
@ -4,4 +4,5 @@ package opts
|
|||
|
||||
import "fmt"
|
||||
|
||||
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
||||
|
|
|
@ -2,6 +2,5 @@
|
|||
|
||||
package opts
|
||||
|
||||
import "fmt"
|
||||
|
||||
var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||
// DefaultHost constant defines the default host string used by docker on Windows
|
||||
var DefaultHost = DefaultTCPHost
|
||||
|
|
|
@ -5,20 +5,25 @@ import (
|
|||
"net"
|
||||
)
|
||||
|
||||
// IpOpt type that hold an IP
|
||||
type IpOpt struct {
|
||||
// IPOpt holds an IP. It is used to store values from CLI flags.
|
||||
type IPOpt struct {
|
||||
*net.IP
|
||||
}
|
||||
|
||||
func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt {
|
||||
o := &IpOpt{
|
||||
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
||||
// string representation of an IP. If the string is not a valid
|
||||
// IP it will fallback to the specified reference.
|
||||
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
||||
o := &IPOpt{
|
||||
IP: ref,
|
||||
}
|
||||
o.Set(defaultVal)
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *IpOpt) Set(val string) error {
|
||||
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
||||
// string is not parsable as an IP address it returns an error.
|
||||
func (o *IPOpt) Set(val string) error {
|
||||
ip := net.ParseIP(val)
|
||||
if ip == nil {
|
||||
return fmt.Errorf("%s is not an ip address", val)
|
||||
|
@ -27,7 +32,9 @@ func (o *IpOpt) Set(val string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (o *IpOpt) String() string {
|
||||
// String returns the IP address stored in the IPOpt. If stored IP is a
|
||||
// nil pointer, it returns an empty string.
|
||||
func (o *IPOpt) String() string {
|
||||
if *o.IP == nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIpOptString(t *testing.T) {
|
||||
addresses := []string{"", "0.0.0.0"}
|
||||
var ip net.IP
|
||||
|
||||
for _, address := range addresses {
|
||||
stringAddress := NewIpOpt(&ip, address).String()
|
||||
if stringAddress != address {
|
||||
t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewIpOptInvalidDefaultVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
defaultVal := "Not an ip"
|
||||
|
||||
ipOpt := NewIpOpt(&ip, defaultVal)
|
||||
|
||||
expected := "127.0.0.1"
|
||||
if ipOpt.String() != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewIpOptValidDefaultVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
defaultVal := "192.168.1.1"
|
||||
|
||||
ipOpt := NewIpOpt(&ip, defaultVal)
|
||||
|
||||
expected := "192.168.1.1"
|
||||
if ipOpt.String() != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestIpOptSetInvalidVal(t *testing.T) {
|
||||
ip := net.IPv4(127, 0, 0, 1)
|
||||
ipOpt := &IpOpt{IP: &ip}
|
||||
|
||||
invalidIp := "invalid ip"
|
||||
expectedError := "invalid ip is not an ip address"
|
||||
err := ipOpt.Set(invalidIp)
|
||||
if err == nil || err.Error() != expectedError {
|
||||
t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error())
|
||||
}
|
||||
}
|
|
@ -9,36 +9,45 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume"
|
||||
)
|
||||
|
||||
var (
|
||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080
|
||||
DefaultHTTPHost = "127.0.0.1"
|
||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp://
|
||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
||||
DefaultHTTPHost = "localhost"
|
||||
|
||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
|
||||
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
|
||||
// is not supplied. A better longer term solution would be to use a named
|
||||
// pipe as the default on the Windows daemon.
|
||||
// These are the IANA registered port numbers for use with Docker
|
||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
||||
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
||||
// DefaultUnixSocket Path for the unix socket.
|
||||
// Docker daemon by default always listens on the default unix socket
|
||||
DefaultUnixSocket = "/var/run/docker.sock"
|
||||
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
||||
)
|
||||
|
||||
// ListOpts type that hold a list of values and a validation function.
|
||||
// ListOpts holds a list of values and a validation function.
|
||||
type ListOpts struct {
|
||||
values *[]string
|
||||
validator ValidatorFctType
|
||||
}
|
||||
|
||||
// NewListOpts Create a new ListOpts with the specified validator.
|
||||
// NewListOpts creates a new ListOpts with the specified validator.
|
||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
||||
var values []string
|
||||
return *NewListOptsRef(&values, validator)
|
||||
}
|
||||
|
||||
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
||||
return &ListOpts{
|
||||
values: values,
|
||||
|
@ -64,7 +73,7 @@ func (opts *ListOpts) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Delete remove the given element from the slice.
|
||||
// Delete removes the specified element from the slice.
|
||||
func (opts *ListOpts) Delete(key string) {
|
||||
for i, k := range *opts.values {
|
||||
if k == key {
|
||||
|
@ -76,7 +85,6 @@ func (opts *ListOpts) Delete(key string) {
|
|||
|
||||
// GetMap returns the content of values in a map in order to avoid
|
||||
// duplicates.
|
||||
// FIXME: can we remove this?
|
||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
||||
ret := make(map[string]struct{})
|
||||
for _, k := range *opts.values {
|
||||
|
@ -85,13 +93,12 @@ func (opts *ListOpts) GetMap() map[string]struct{} {
|
|||
return ret
|
||||
}
|
||||
|
||||
// GetAll returns the values' slice.
|
||||
// FIXME: Can we remove this?
|
||||
// GetAll returns the values of slice.
|
||||
func (opts *ListOpts) GetAll() []string {
|
||||
return (*opts.values)
|
||||
}
|
||||
|
||||
// Get checks the existence of the given key.
|
||||
// Get checks the existence of the specified key.
|
||||
func (opts *ListOpts) Get(key string) bool {
|
||||
for _, k := range *opts.values {
|
||||
if k == key {
|
||||
|
@ -106,7 +113,7 @@ func (opts *ListOpts) Len() int {
|
|||
return len((*opts.values))
|
||||
}
|
||||
|
||||
//MapOpts type that holds a map of values and a validation function.
|
||||
//MapOpts holds a map of values and a validation function.
|
||||
type MapOpts struct {
|
||||
values map[string]string
|
||||
validator ValidatorFctType
|
||||
|
@ -131,10 +138,16 @@ func (opts *MapOpts) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetAll returns the values of MapOpts as a map.
|
||||
func (opts *MapOpts) GetAll() map[string]string {
|
||||
return opts.values
|
||||
}
|
||||
|
||||
func (opts *MapOpts) String() string {
|
||||
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
||||
}
|
||||
|
||||
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||
if values == nil {
|
||||
values = make(map[string]string)
|
||||
|
@ -145,13 +158,13 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// ValidatorFctType validator that return a validate string and/or an error
|
||||
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
||||
type ValidatorFctType func(val string) (string, error)
|
||||
|
||||
// ValidatorFctListType validator that return a validate list of string and/or an error
|
||||
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
||||
type ValidatorFctListType func(val string) ([]string, error)
|
||||
|
||||
// ValidateAttach Validates that the specified string is a valid attach option.
|
||||
// ValidateAttach validates that the specified string is a valid attach option.
|
||||
func ValidateAttach(val string) (string, error) {
|
||||
s := strings.ToLower(val)
|
||||
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
||||
|
@ -162,7 +175,7 @@ func ValidateAttach(val string) (string, error) {
|
|||
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
|
||||
}
|
||||
|
||||
// ValidateLink Validates that the specified string has a valid link format (containerName:alias).
|
||||
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
|
||||
func ValidateLink(val string) (string, error) {
|
||||
if _, _, err := parsers.ParseLink(val); err != nil {
|
||||
return val, err
|
||||
|
@ -170,53 +183,66 @@ func ValidateLink(val string) (string, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateDevice Validate a path for devices
|
||||
// ValidDeviceMode checks if the mode for device is valid or not.
|
||||
// Valid mode is a composition of r (read), w (write), and m (mknod).
|
||||
func ValidDeviceMode(mode string) bool {
|
||||
var legalDeviceMode = map[rune]bool{
|
||||
'r': true,
|
||||
'w': true,
|
||||
'm': true,
|
||||
}
|
||||
if mode == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range mode {
|
||||
if !legalDeviceMode[c] {
|
||||
return false
|
||||
}
|
||||
legalDeviceMode[c] = false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ValidateDevice validates a path for devices
|
||||
// It will make sure 'val' is in the form:
|
||||
// [host-dir:]container-path[:mode]
|
||||
// It also validates the device mode.
|
||||
func ValidateDevice(val string) (string, error) {
|
||||
return validatePath(val, false)
|
||||
return validatePath(val, ValidDeviceMode)
|
||||
}
|
||||
|
||||
// ValidatePath Validate a path for volumes
|
||||
// It will make sure 'val' is in the form:
|
||||
// [host-dir:]container-path[:rw|ro]
|
||||
// It will also validate the mount mode.
|
||||
func ValidatePath(val string) (string, error) {
|
||||
return validatePath(val, true)
|
||||
}
|
||||
|
||||
func validatePath(val string, validateMountMode bool) (string, error) {
|
||||
func validatePath(val string, validator func(string) bool) (string, error) {
|
||||
var containerPath string
|
||||
var mode string
|
||||
|
||||
if strings.Count(val, ":") > 2 {
|
||||
return val, fmt.Errorf("bad format for volumes: %s", val)
|
||||
return val, fmt.Errorf("bad format for path: %s", val)
|
||||
}
|
||||
|
||||
splited := strings.SplitN(val, ":", 3)
|
||||
if splited[0] == "" {
|
||||
return val, fmt.Errorf("bad format for volumes: %s", val)
|
||||
split := strings.SplitN(val, ":", 3)
|
||||
if split[0] == "" {
|
||||
return val, fmt.Errorf("bad format for path: %s", val)
|
||||
}
|
||||
switch len(splited) {
|
||||
switch len(split) {
|
||||
case 1:
|
||||
containerPath = splited[0]
|
||||
containerPath = split[0]
|
||||
val = path.Clean(containerPath)
|
||||
case 2:
|
||||
if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid {
|
||||
containerPath = splited[0]
|
||||
mode = splited[1]
|
||||
if isValid := validator(split[1]); isValid {
|
||||
containerPath = split[0]
|
||||
mode = split[1]
|
||||
val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode)
|
||||
} else {
|
||||
containerPath = splited[1]
|
||||
val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath))
|
||||
containerPath = split[1]
|
||||
val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath))
|
||||
}
|
||||
case 3:
|
||||
containerPath = splited[1]
|
||||
mode = splited[2]
|
||||
if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid {
|
||||
return val, fmt.Errorf("bad mount mode specified : %s", mode)
|
||||
containerPath = split[1]
|
||||
mode = split[2]
|
||||
if isValid := validator(split[2]); !isValid {
|
||||
return val, fmt.Errorf("bad mode specified: %s", mode)
|
||||
}
|
||||
val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode)
|
||||
val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode)
|
||||
}
|
||||
|
||||
if !path.IsAbs(containerPath) {
|
||||
|
@ -225,24 +251,24 @@ func validatePath(val string, validateMountMode bool) (string, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateEnv Validate an environment variable and returns it
|
||||
// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid.
|
||||
// ValidateEnv validates an environment variable and returns it.
|
||||
// If no value is specified, it returns the current value using os.Getenv.
|
||||
//
|
||||
// As on ParseEnvFile and related to #16585, environment variable names
|
||||
// are not validate what so ever, it's up to application inside docker
|
||||
// to validate them or not.
|
||||
func ValidateEnv(val string) (string, error) {
|
||||
arr := strings.Split(val, "=")
|
||||
if len(arr) > 1 {
|
||||
return val, nil
|
||||
}
|
||||
if !EnvironmentVariableRegexp.MatchString(arr[0]) {
|
||||
return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)}
|
||||
}
|
||||
if !doesEnvExist(val) {
|
||||
return val, nil
|
||||
}
|
||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
||||
}
|
||||
|
||||
// ValidateIPAddress Validates an Ip address
|
||||
// ValidateIPAddress validates an Ip address.
|
||||
func ValidateIPAddress(val string) (string, error) {
|
||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
||||
if ip != nil {
|
||||
|
@ -251,7 +277,7 @@ func ValidateIPAddress(val string) (string, error) {
|
|||
return "", fmt.Errorf("%s is not an ip address", val)
|
||||
}
|
||||
|
||||
// ValidateMACAddress Validates a MAC address
|
||||
// ValidateMACAddress validates a MAC address.
|
||||
func ValidateMACAddress(val string) (string, error) {
|
||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
||||
if err != nil {
|
||||
|
@ -260,8 +286,8 @@ func ValidateMACAddress(val string) (string, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateDNSSearch Validates domain for resolvconf search configuration.
|
||||
// A zero length domain is represented by .
|
||||
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
||||
// A zero length domain is represented by a dot (.).
|
||||
func ValidateDNSSearch(val string) (string, error) {
|
||||
if val = strings.Trim(val, " "); val == "." {
|
||||
return val, nil
|
||||
|
@ -280,8 +306,8 @@ func validateDomain(val string) (string, error) {
|
|||
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||
}
|
||||
|
||||
// ValidateExtraHost Validate that the given string is a valid extrahost and returns it
|
||||
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6)
|
||||
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
||||
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
|
||||
func ValidateExtraHost(val string) (string, error) {
|
||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
||||
arr := strings.SplitN(val, ":", 2)
|
||||
|
@ -294,8 +320,8 @@ func ValidateExtraHost(val string) (string, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateLabel Validate that the given string is a valid label, and returns it
|
||||
// Labels are in the form on key=value
|
||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
||||
// Labels are in the form on key=value.
|
||||
func ValidateLabel(val string) (string, error) {
|
||||
if strings.Count(val, "=") < 1 {
|
||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
||||
|
@ -303,9 +329,20 @@ func ValidateLabel(val string) (string, error) {
|
|||
return val, nil
|
||||
}
|
||||
|
||||
// ValidateHost Validate that the given string is a valid host and returns it
|
||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
||||
func ValidateHost(val string) (string, error) {
|
||||
host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val)
|
||||
_, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
// Note: unlike most flag validators, we don't return the mutated value here
|
||||
// we need to know what the user entered later (using ParseHost) to adjust for tls
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ParseHost and set defaults for a Daemon host string
|
||||
func ParseHost(defaultHost, val string) (string, error) {
|
||||
host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
|
|
|
@ -1,479 +0,0 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValidateIPAddress(t *testing.T) {
|
||||
if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMapOpts(t *testing.T) {
|
||||
tmpMap := make(map[string]string)
|
||||
o := NewMapOpts(tmpMap, logOptsValidator)
|
||||
o.Set("max-size=1")
|
||||
if o.String() != "map[max-size:1]" {
|
||||
t.Errorf("%s != [map[max-size:1]", o.String())
|
||||
}
|
||||
|
||||
o.Set("max-file=2")
|
||||
if len(tmpMap) != 2 {
|
||||
t.Errorf("map length %d != 2", len(tmpMap))
|
||||
}
|
||||
|
||||
if tmpMap["max-file"] != "2" {
|
||||
t.Errorf("max-file = %s != 2", tmpMap["max-file"])
|
||||
}
|
||||
|
||||
if tmpMap["max-size"] != "1" {
|
||||
t.Errorf("max-size = %s != 1", tmpMap["max-size"])
|
||||
}
|
||||
if o.Set("dummy-val=3") == nil {
|
||||
t.Errorf("validator is not being called")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMACAddress(t *testing.T) {
|
||||
if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil {
|
||||
t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err)
|
||||
}
|
||||
|
||||
if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil {
|
||||
t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC")
|
||||
}
|
||||
|
||||
if _, err := ValidateMACAddress(`random invalid string`); err == nil {
|
||||
t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC")
|
||||
}
|
||||
}
|
||||
|
||||
func TestListOptsWithoutValidator(t *testing.T) {
|
||||
o := NewListOpts(nil)
|
||||
o.Set("foo")
|
||||
if o.String() != "[foo]" {
|
||||
t.Errorf("%s != [foo]", o.String())
|
||||
}
|
||||
o.Set("bar")
|
||||
if o.Len() != 2 {
|
||||
t.Errorf("%d != 2", o.Len())
|
||||
}
|
||||
o.Set("bar")
|
||||
if o.Len() != 3 {
|
||||
t.Errorf("%d != 3", o.Len())
|
||||
}
|
||||
if !o.Get("bar") {
|
||||
t.Error("o.Get(\"bar\") == false")
|
||||
}
|
||||
if o.Get("baz") {
|
||||
t.Error("o.Get(\"baz\") == true")
|
||||
}
|
||||
o.Delete("foo")
|
||||
if o.String() != "[bar bar]" {
|
||||
t.Errorf("%s != [bar bar]", o.String())
|
||||
}
|
||||
listOpts := o.GetAll()
|
||||
if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" {
|
||||
t.Errorf("Expected [[bar bar]], got [%v]", listOpts)
|
||||
}
|
||||
mapListOpts := o.GetMap()
|
||||
if len(mapListOpts) != 1 {
|
||||
t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestListOptsWithValidator(t *testing.T) {
|
||||
// Re-using logOptsvalidator (used by MapOpts)
|
||||
o := NewListOpts(logOptsValidator)
|
||||
o.Set("foo")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
o.Set("foo=bar")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
o.Set("max-file=2")
|
||||
if o.Len() != 1 {
|
||||
t.Errorf("%d != 1", o.Len())
|
||||
}
|
||||
if !o.Get("max-file=2") {
|
||||
t.Error("o.Get(\"max-file=2\") == false")
|
||||
}
|
||||
if o.Get("baz") {
|
||||
t.Error("o.Get(\"baz\") == true")
|
||||
}
|
||||
o.Delete("max-file=2")
|
||||
if o.String() != "[]" {
|
||||
t.Errorf("%s != []", o.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateDNSSearch(t *testing.T) {
|
||||
valid := []string{
|
||||
`.`,
|
||||
`a`,
|
||||
`a.`,
|
||||
`1.foo`,
|
||||
`17.foo`,
|
||||
`foo.bar`,
|
||||
`foo.bar.baz`,
|
||||
`foo.bar.`,
|
||||
`foo.bar.baz`,
|
||||
`foo1.bar2`,
|
||||
`foo1.bar2.baz`,
|
||||
`1foo.2bar.`,
|
||||
`1foo.2bar.baz`,
|
||||
`foo-1.bar-2`,
|
||||
`foo-1.bar-2.baz`,
|
||||
`foo-1.bar-2.`,
|
||||
`foo-1.bar-2.baz`,
|
||||
`1-foo.2-bar`,
|
||||
`1-foo.2-bar.baz`,
|
||||
`1-foo.2-bar.`,
|
||||
`1-foo.2-bar.baz`,
|
||||
}
|
||||
|
||||
invalid := []string{
|
||||
``,
|
||||
` `,
|
||||
` `,
|
||||
`17`,
|
||||
`17.`,
|
||||
`.17`,
|
||||
`17-.`,
|
||||
`17-.foo`,
|
||||
`.foo`,
|
||||
`foo-.bar`,
|
||||
`-foo.bar`,
|
||||
`foo.bar-`,
|
||||
`foo.bar-.baz`,
|
||||
`foo.-bar`,
|
||||
`foo.-bar.baz`,
|
||||
`foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`,
|
||||
}
|
||||
|
||||
for _, domain := range valid {
|
||||
if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" {
|
||||
t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, domain := range invalid {
|
||||
if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" {
|
||||
t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateExtraHosts(t *testing.T) {
|
||||
valid := []string{
|
||||
`myhost:192.168.0.1`,
|
||||
`thathost:10.0.2.1`,
|
||||
`anipv6host:2003:ab34:e::1`,
|
||||
`ipv6local:::1`,
|
||||
}
|
||||
|
||||
invalid := map[string]string{
|
||||
`myhost:192.notanipaddress.1`: `invalid IP`,
|
||||
`thathost-nosemicolon10.0.0.1`: `bad format`,
|
||||
`anipv6host:::::1`: `invalid IP`,
|
||||
`ipv6local:::0::`: `invalid IP`,
|
||||
}
|
||||
|
||||
for _, extrahost := range valid {
|
||||
if _, err := ValidateExtraHost(extrahost); err != nil {
|
||||
t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for extraHost, expectedError := range invalid {
|
||||
if _, err := ValidateExtraHost(extraHost); err == nil {
|
||||
t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost)
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), expectedError) {
|
||||
t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAttach(t *testing.T) {
|
||||
valid := []string{
|
||||
"stdin",
|
||||
"stdout",
|
||||
"stderr",
|
||||
"STDIN",
|
||||
"STDOUT",
|
||||
"STDERR",
|
||||
}
|
||||
if _, err := ValidateAttach("invalid"); err == nil {
|
||||
t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing")
|
||||
}
|
||||
|
||||
for _, attach := range valid {
|
||||
value, err := ValidateAttach(attach)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if value != strings.ToLower(attach) {
|
||||
t.Fatalf("Expected [%v], got [%v]", attach, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLink(t *testing.T) {
|
||||
valid := []string{
|
||||
"name",
|
||||
"dcdfbe62ecd0:alias",
|
||||
"7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da",
|
||||
"angry_torvalds:linus",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "empty string specified for links",
|
||||
"too:much:of:it": "bad format for links: too:much:of:it",
|
||||
}
|
||||
|
||||
for _, link := range valid {
|
||||
if _, err := ValidateLink(link); err != nil {
|
||||
t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err)
|
||||
}
|
||||
}
|
||||
|
||||
for link, expectedError := range invalid {
|
||||
if _, err := ValidateLink(link); err == nil {
|
||||
t.Fatalf("ValidateLink(`%q`) should have failed validation", link)
|
||||
} else {
|
||||
if !strings.Contains(err.Error(), expectedError) {
|
||||
t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePath(t *testing.T) {
|
||||
valid := []string{
|
||||
"/home",
|
||||
"/home:/home",
|
||||
"/home:/something/else",
|
||||
"/with space",
|
||||
"/home:/with space",
|
||||
"relative:/absolute-path",
|
||||
"hostPath:/containerPath:ro",
|
||||
"/hostPath:/containerPath:rw",
|
||||
"/rw:/ro",
|
||||
"/path:rw",
|
||||
"/path:ro",
|
||||
"/rw:rw",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "bad format for volumes: ",
|
||||
"./": "./ is not an absolute path",
|
||||
"../": "../ is not an absolute path",
|
||||
"/:../": "../ is not an absolute path",
|
||||
"/:path": "path is not an absolute path",
|
||||
":": "bad format for volumes: :",
|
||||
"/tmp:": " is not an absolute path",
|
||||
":test": "bad format for volumes: :test",
|
||||
":/test": "bad format for volumes: :/test",
|
||||
"tmp:": " is not an absolute path",
|
||||
":test:": "bad format for volumes: :test:",
|
||||
"::": "bad format for volumes: ::",
|
||||
":::": "bad format for volumes: :::",
|
||||
"/tmp:::": "bad format for volumes: /tmp:::",
|
||||
":/tmp::": "bad format for volumes: :/tmp::",
|
||||
"path:ro": "path is not an absolute path",
|
||||
"/path:/path:sw": "bad mount mode specified : sw",
|
||||
"/path:/path:rwz": "bad mount mode specified : rwz",
|
||||
}
|
||||
|
||||
for _, path := range valid {
|
||||
if _, err := ValidatePath(path); err != nil {
|
||||
t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
for path, expectedError := range invalid {
|
||||
if _, err := ValidatePath(path); err == nil {
|
||||
t.Fatalf("ValidatePath(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestValidateDevice(t *testing.T) {
|
||||
valid := []string{
|
||||
"/home",
|
||||
"/home:/home",
|
||||
"/home:/something/else",
|
||||
"/with space",
|
||||
"/home:/with space",
|
||||
"relative:/absolute-path",
|
||||
"hostPath:/containerPath:ro",
|
||||
"/hostPath:/containerPath:rw",
|
||||
"/hostPath:/containerPath:mrw",
|
||||
}
|
||||
invalid := map[string]string{
|
||||
"": "bad format for volumes: ",
|
||||
"./": "./ is not an absolute path",
|
||||
"../": "../ is not an absolute path",
|
||||
"/:../": "../ is not an absolute path",
|
||||
"/:path": "path is not an absolute path",
|
||||
":": "bad format for volumes: :",
|
||||
"/tmp:": " is not an absolute path",
|
||||
":test": "bad format for volumes: :test",
|
||||
":/test": "bad format for volumes: :/test",
|
||||
"tmp:": " is not an absolute path",
|
||||
":test:": "bad format for volumes: :test:",
|
||||
"::": "bad format for volumes: ::",
|
||||
":::": "bad format for volumes: :::",
|
||||
"/tmp:::": "bad format for volumes: /tmp:::",
|
||||
":/tmp::": "bad format for volumes: :/tmp::",
|
||||
"path:ro": "ro is not an absolute path",
|
||||
}
|
||||
|
||||
for _, path := range valid {
|
||||
if _, err := ValidateDevice(path); err != nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
for path, expectedError := range invalid {
|
||||
if _, err := ValidateDevice(path); err == nil {
|
||||
t.Fatalf("ValidateDevice(`%q`) should have failed validation", path)
|
||||
} else {
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateEnv(t *testing.T) {
|
||||
invalids := map[string]string{
|
||||
"some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable",
|
||||
"asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable",
|
||||
"1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable",
|
||||
"123": "poorly formatted environment: variable '123' is not a valid environment variable",
|
||||
}
|
||||
valids := map[string]string{
|
||||
"a": "a",
|
||||
"something": "something",
|
||||
"_=a": "_=a",
|
||||
"env1=value1": "env1=value1",
|
||||
"_env1=value1": "_env1=value1",
|
||||
"env2=value2=value3": "env2=value2=value3",
|
||||
"env3=abc!qwe": "env3=abc!qwe",
|
||||
"env_4=value 4": "env_4=value 4",
|
||||
"PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")),
|
||||
"PATH=something": "PATH=something",
|
||||
}
|
||||
for value, expectedError := range invalids {
|
||||
_, err := ValidateEnv(value)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected ErrBadEnvVariable, got nothing")
|
||||
}
|
||||
if _, ok := err.(ErrBadEnvVariable); !ok {
|
||||
t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err)
|
||||
}
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error())
|
||||
}
|
||||
}
|
||||
for value, expected := range valids {
|
||||
actual, err := ValidateEnv(value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if actual != expected {
|
||||
t.Fatalf("Expected [%v], got [%v]", expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLabel(t *testing.T) {
|
||||
if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" {
|
||||
t.Fatalf("Expected an error [bad attribute format: label], go %v", err)
|
||||
}
|
||||
if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" {
|
||||
t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err)
|
||||
}
|
||||
// Validate it's working with more than one =
|
||||
if actual, err := ValidateLabel("key1=value1=value2"); err != nil {
|
||||
t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err)
|
||||
}
|
||||
// Validate it's working with one more
|
||||
if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil {
|
||||
t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateHost(t *testing.T) {
|
||||
invalid := map[string]string{
|
||||
"anything": "Invalid bind address format: anything",
|
||||
"something with spaces": "Invalid bind address format: something with spaces",
|
||||
"://": "Invalid bind address format: ://",
|
||||
"unknown://": "Invalid bind address format: unknown://",
|
||||
"tcp://": "Invalid proto, expected tcp: ",
|
||||
"tcp://:port": "Invalid bind address format: :port",
|
||||
"tcp://invalid": "Invalid bind address format: invalid",
|
||||
"tcp://invalid:port": "Invalid bind address format: invalid:port",
|
||||
}
|
||||
valid := map[string]string{
|
||||
"fd://": "fd://",
|
||||
"fd://something": "fd://something",
|
||||
"tcp://:2375": "tcp://127.0.0.1:2375", // default ip address
|
||||
"tcp://:2376": "tcp://127.0.0.1:2376", // default ip address
|
||||
"tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080",
|
||||
"tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000",
|
||||
"tcp://192.168:8080": "tcp://192.168:8080",
|
||||
"tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P
|
||||
"tcp://docker.com:2375": "tcp://docker.com:2375",
|
||||
"unix://": "unix:///var/run/docker.sock", // default unix:// value
|
||||
"unix://path/to/socket": "unix://path/to/socket",
|
||||
}
|
||||
|
||||
for value, errorMessage := range invalid {
|
||||
if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage {
|
||||
t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err)
|
||||
}
|
||||
}
|
||||
for value, expected := range valid {
|
||||
if actual, err := ValidateHost(value); err != nil || actual != expected {
|
||||
t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logOptsValidator(val string) (string, error) {
|
||||
allowedKeys := map[string]string{"max-size": "1", "max-file": "2"}
|
||||
vals := strings.Split(val, "=")
|
||||
if allowedKeys[vals[0]] != "" {
|
||||
return val, nil
|
||||
}
|
||||
return "", fmt.Errorf("invalid key %s", vals[0])
|
||||
}
|
|
@ -6,10 +6,12 @@ import (
|
|||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
|
||||
)
|
||||
|
||||
// UlimitOpt defines a map of Ulimits
|
||||
type UlimitOpt struct {
|
||||
values *map[string]*ulimit.Ulimit
|
||||
}
|
||||
|
||||
// NewUlimitOpt creates a new UlimitOpt
|
||||
func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
|
||||
if ref == nil {
|
||||
ref = &map[string]*ulimit.Ulimit{}
|
||||
|
@ -17,6 +19,7 @@ func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt {
|
|||
return &UlimitOpt{ref}
|
||||
}
|
||||
|
||||
// Set validates a Ulimit and sets its name as a key in UlimitOpt
|
||||
func (o *UlimitOpt) Set(val string) error {
|
||||
l, err := ulimit.Parse(val)
|
||||
if err != nil {
|
||||
|
@ -28,6 +31,7 @@ func (o *UlimitOpt) Set(val string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// String returns Ulimit values as a string.
|
||||
func (o *UlimitOpt) String() string {
|
||||
var out []string
|
||||
for _, v := range *o.values {
|
||||
|
@ -37,6 +41,7 @@ func (o *UlimitOpt) String() string {
|
|||
return fmt.Sprintf("%v", out)
|
||||
}
|
||||
|
||||
// GetList returns a slice of pointers to Ulimits.
|
||||
func (o *UlimitOpt) GetList() []*ulimit.Ulimit {
|
||||
var ulimits []*ulimit.Ulimit
|
||||
for _, v := range *o.values {
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
package opts
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit"
|
||||
)
|
||||
|
||||
func TestUlimitOpt(t *testing.T) {
|
||||
ulimitMap := map[string]*ulimit.Ulimit{
|
||||
"nofile": {"nofile", 1024, 512},
|
||||
}
|
||||
|
||||
ulimitOpt := NewUlimitOpt(&ulimitMap)
|
||||
|
||||
expected := "[nofile=512:1024]"
|
||||
if ulimitOpt.String() != expected {
|
||||
t.Fatalf("Expected %v, got %v", expected, ulimitOpt)
|
||||
}
|
||||
|
||||
// Valid ulimit append to opts
|
||||
if err := ulimitOpt.Set("core=1024:1024"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid ulimit type returns an error and do not append to opts
|
||||
if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil {
|
||||
t.Fatalf("Expected error on invalid ulimit type")
|
||||
}
|
||||
expected = "[nofile=512:1024 core=1024:1024]"
|
||||
expected2 := "[core=1024:1024 nofile=512:1024]"
|
||||
result := ulimitOpt.String()
|
||||
if result != expected && result != expected2 {
|
||||
t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt)
|
||||
}
|
||||
|
||||
// And test GetList
|
||||
ulimits := ulimitOpt.GetList()
|
||||
if len(ulimits) != 2 {
|
||||
t.Fatalf("Expected a ulimit list of 2, got %v", ulimits)
|
||||
}
|
||||
}
|
|
@ -19,35 +19,50 @@ import (
|
|||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
type (
|
||||
Archive io.ReadCloser
|
||||
ArchiveReader io.Reader
|
||||
Compression int
|
||||
// Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
|
||||
Archive io.ReadCloser
|
||||
// Reader is a type of io.Reader.
|
||||
Reader io.Reader
|
||||
// Compression is the state represtents if compressed or not.
|
||||
Compression int
|
||||
// TarChownOptions wraps the chown options UID and GID.
|
||||
TarChownOptions struct {
|
||||
UID, GID int
|
||||
}
|
||||
// TarOptions wraps the tar options.
|
||||
TarOptions struct {
|
||||
IncludeFiles []string
|
||||
ExcludePatterns []string
|
||||
Compression Compression
|
||||
NoLchown bool
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
ChownOpts *TarChownOptions
|
||||
Name string
|
||||
IncludeSourceDir bool
|
||||
// When unpacking, specifies whether overwriting a directory with a
|
||||
// non-directory is allowed and vice versa.
|
||||
NoOverwriteDirNonDir bool
|
||||
// For each include when creating an archive, the included name will be
|
||||
// replaced with the matching name from this map.
|
||||
RebaseNames map[string]string
|
||||
}
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
// with a pluggable Untar function.
|
||||
// with a pluggable Untar function. Also, to facilitate the passing of
|
||||
// specific id mappings for untar, an archiver can be created with maps
|
||||
// which will then be passed to Untar operations
|
||||
Archiver struct {
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
}
|
||||
|
||||
// breakoutError is used to differentiate errors related to breaking out
|
||||
|
@ -57,17 +72,23 @@ type (
|
|||
)
|
||||
|
||||
var (
|
||||
// ErrNotImplemented is the error message of function not implemented.
|
||||
ErrNotImplemented = errors.New("Function not implemented")
|
||||
defaultArchiver = &Archiver{Untar}
|
||||
defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
|
||||
)
|
||||
|
||||
const (
|
||||
// Uncompressed represents the uncompressed.
|
||||
Uncompressed Compression = iota
|
||||
// Bzip2 is bzip2 compression algorithm.
|
||||
Bzip2
|
||||
// Gzip is gzip compression algorithm.
|
||||
Gzip
|
||||
// Xz is xz compression algorithm.
|
||||
Xz
|
||||
)
|
||||
|
||||
// IsArchive checks if it is a archive by the header.
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := DetectCompression(header)
|
||||
if compression != Uncompressed {
|
||||
|
@ -78,6 +99,7 @@ func IsArchive(header []byte) bool {
|
|||
return err == nil
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Bzip2: {0x42, 0x5A, 0x68},
|
||||
|
@ -95,12 +117,13 @@ func DetectCompression(source []byte) Compression {
|
|||
return Uncompressed
|
||||
}
|
||||
|
||||
func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
|
||||
func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
|
||||
args := []string{"xz", "-d", "-c", "-q"}
|
||||
|
||||
return CmdStream(exec.Command(args[0], args[1:]...), archive)
|
||||
return cmdStream(exec.Command(args[0], args[1:]...), archive)
|
||||
}
|
||||
|
||||
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
|
||||
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
||||
p := pools.BufioReader32KPool
|
||||
buf := p.Get(archive)
|
||||
|
@ -126,17 +149,21 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
|
|||
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
|
||||
return readBufWrapper, nil
|
||||
case Xz:
|
||||
xzReader, err := xzDecompress(buf)
|
||||
xzReader, chdone, err := xzDecompress(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
|
||||
return readBufWrapper, nil
|
||||
return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
|
||||
<-chdone
|
||||
return readBufWrapper.Close()
|
||||
}), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
// CompressStream compresses the dest with specified compression algorithm.
|
||||
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
|
||||
p := pools.BufioWriter32KPool
|
||||
buf := p.Get(dest)
|
||||
|
@ -157,6 +184,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose
|
|||
}
|
||||
}
|
||||
|
||||
// Extension returns the extension of a file that uses the specified compression algorithm.
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Uncompressed:
|
||||
|
@ -177,6 +205,8 @@ type tarAppender struct {
|
|||
|
||||
// for hardlink mapping
|
||||
SeenFiles map[uint64]string
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
}
|
||||
|
||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||
|
@ -219,14 +249,14 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
}
|
||||
hdr.Name = name
|
||||
|
||||
nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
|
||||
inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if it's a regular file and has more than 1 link,
|
||||
// if it's not a directory and has more than 1 link,
|
||||
// it's hardlinked, so set the type flag accordingly
|
||||
if fi.Mode().IsRegular() && nlink > 1 {
|
||||
if !fi.IsDir() && hasHardlinks(fi) {
|
||||
// a link should have a name that it links too
|
||||
// and that linked name should be first in the tar archive
|
||||
if oldpath, ok := ta.SeenFiles[inode]; ok {
|
||||
|
@ -244,6 +274,25 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
|||
hdr.Xattrs["security.capability"] = string(capability)
|
||||
}
|
||||
|
||||
//handle re-mapping container ID mappings back to host ID mappings before
|
||||
//writing tar headers/files
|
||||
if ta.UIDMaps != nil || ta.GIDMaps != nil {
|
||||
uid, gid, err := getFileUIDGID(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -358,19 +407,19 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
|||
return err
|
||||
}
|
||||
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
// syscall.UtimesNano doesn't support a NOFOLLOW flag atm
|
||||
// system.Chtimes doesn't support a NOFOLLOW flag atm
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
|
||||
return err
|
||||
}
|
||||
|
@ -388,6 +437,10 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
|||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
|
||||
patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
|
||||
|
||||
if err != nil {
|
||||
|
@ -406,6 +459,8 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
TarWriter: tar.NewWriter(compressWriter),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: options.UIDMaps,
|
||||
GIDMaps: options.GIDMaps,
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
@ -454,11 +509,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
|
||||
seen := make(map[string]bool)
|
||||
|
||||
var renamedRelFilePath string // For when tar.Options.Name is set
|
||||
for _, include := range options.IncludeFiles {
|
||||
// We can't use filepath.Join(srcPath, include) because this will
|
||||
// clean away a trailing "." or "/" which may be important.
|
||||
walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
walkRoot := getWalkRoot(srcPath, include)
|
||||
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
|
||||
|
@ -503,14 +557,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
}
|
||||
seen[relFilePath] = true
|
||||
|
||||
// TODO Windows: Verify if this needs to be os.Pathseparator
|
||||
// Rename the base resource
|
||||
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
|
||||
renamedRelFilePath = relFilePath
|
||||
}
|
||||
// Set this to make sure the items underneath also get renamed
|
||||
if options.Name != "" {
|
||||
relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
|
||||
// Rename the base resource.
|
||||
if rebaseName != "" {
|
||||
var replacement string
|
||||
if rebaseName != string(filepath.Separator) {
|
||||
// Special case the root directory to replace with an
|
||||
// empty string instead so that we don't end up with
|
||||
// double slashes in the paths.
|
||||
replacement = rebaseName
|
||||
}
|
||||
|
||||
relFilePath = strings.Replace(relFilePath, include, replacement, 1)
|
||||
}
|
||||
|
||||
if err := ta.addTarFile(filePath, relFilePath); err != nil {
|
||||
|
@ -524,12 +581,17 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
|||
return pipeReader, nil
|
||||
}
|
||||
|
||||
// Unpack unpacks the decompressedArchive to dest with options.
|
||||
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
|
||||
tr := tar.NewReader(decompressedArchive)
|
||||
trBuf := pools.BufioReader32KPool.Get(nil)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
loop:
|
||||
|
@ -607,6 +669,28 @@ loop:
|
|||
}
|
||||
trBuf.Reset(tr)
|
||||
|
||||
// if the options contain a uid & gid maps, convert header uid/gid
|
||||
// entries using the maps such that lchown sets the proper mapped
|
||||
// uid/gid after writing the file. We only perform this mapping if
|
||||
// the file isn't already owned by the remapped root UID or GID, as
|
||||
// that specific uid/gid has no mapping from container -> host, and
|
||||
// those files already have the proper ownership for inside the
|
||||
// container.
|
||||
if hdr.Uid != remappedRootUID {
|
||||
xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
}
|
||||
if hdr.Gid != remappedRootGID {
|
||||
xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -620,8 +704,8 @@ loop:
|
|||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -637,7 +721,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
|
|||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
|
@ -657,7 +741,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
|
|||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
var r io.Reader = tarArchive
|
||||
r := tarArchive
|
||||
if decompress {
|
||||
decompressedArchive, err := DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
|
@ -670,6 +754,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
|
|||
return Unpack(r, dest, options)
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
||||
func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
logrus.Debugf("TarUntar(%s %s)", src, dst)
|
||||
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
|
||||
|
@ -677,7 +763,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
|||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
return archiver.Untar(archive, dst, nil)
|
||||
|
||||
var options *TarOptions
|
||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
||||
options = &TarOptions{
|
||||
UIDMaps: archiver.UIDMaps,
|
||||
GIDMaps: archiver.GIDMaps,
|
||||
}
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
|
@ -686,13 +780,21 @@ func TarUntar(src, dst string) error {
|
|||
return defaultArchiver.TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// UntarPath untar a file from path to a destination, src is the source tar file path.
|
||||
func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
archive, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
if err := archiver.Untar(archive, dst, nil); err != nil {
|
||||
var options *TarOptions
|
||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
||||
options = &TarOptions{
|
||||
UIDMaps: archiver.UIDMaps,
|
||||
GIDMaps: archiver.GIDMaps,
|
||||
}
|
||||
}
|
||||
if err := archiver.Untar(archive, dst, options); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -704,6 +806,10 @@ func UntarPath(src, dst string) error {
|
|||
return defaultArchiver.UntarPath(src, dst)
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
// intermediary disk IO.
|
||||
func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
||||
srcSt, err := os.Stat(src)
|
||||
if err != nil {
|
||||
|
@ -714,7 +820,7 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
|||
}
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
|
||||
if err := system.MkdirAll(dst, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
|
@ -729,6 +835,9 @@ func CopyWithTar(src, dst string) error {
|
|||
return defaultArchiver.CopyWithTar(src, dst)
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
|
||||
srcSt, err := os.Stat(src)
|
||||
|
@ -746,7 +855,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
dst = filepath.Join(dst, filepath.Base(src))
|
||||
}
|
||||
// Create the holding directory if necessary
|
||||
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
|
||||
if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -767,6 +876,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only perform mapping if the file being copied isn't already owned by the
|
||||
// uid or gid of the remapped root in the container
|
||||
if remappedRootUID != hdr.Uid {
|
||||
xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
}
|
||||
if remappedRootGID != hdr.Gid {
|
||||
xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
|
@ -782,7 +913,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
|||
err = er
|
||||
}
|
||||
}()
|
||||
return archiver.Untar(r, filepath.Dir(dst), nil)
|
||||
|
||||
err = archiver.Untar(r, filepath.Dir(dst), nil)
|
||||
if err != nil {
|
||||
r.CloseWithError(err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
|
@ -797,57 +933,33 @@ func CopyFileWithTar(src, dst string) (err error) {
|
|||
return defaultArchiver.CopyFileWithTar(src, dst)
|
||||
}
|
||||
|
||||
// CmdStream executes a command, and returns its stdout as a stream.
|
||||
// cmdStream executes a command, and returns its stdout as a stream.
|
||||
// If the command fails to run or doesn't complete successfully, an error
|
||||
// will be returned, including anything written on stderr.
|
||||
func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
|
||||
if input != nil {
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Write stdin if any
|
||||
go func() {
|
||||
io.Copy(stdin, input)
|
||||
stdin.Close()
|
||||
}()
|
||||
}
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
|
||||
chdone := make(chan struct{})
|
||||
cmd.Stdin = input
|
||||
pipeR, pipeW := io.Pipe()
|
||||
errChan := make(chan []byte)
|
||||
// Collect stderr, we will use it in case of an error
|
||||
go func() {
|
||||
errText, e := ioutil.ReadAll(stderr)
|
||||
if e != nil {
|
||||
errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
|
||||
}
|
||||
errChan <- errText
|
||||
}()
|
||||
cmd.Stdout = pipeW
|
||||
var errBuf bytes.Buffer
|
||||
cmd.Stderr = &errBuf
|
||||
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Copy stdout to the returned pipe
|
||||
go func() {
|
||||
_, err := io.Copy(pipeW, stdout)
|
||||
if err != nil {
|
||||
pipeW.CloseWithError(err)
|
||||
}
|
||||
errText := <-errChan
|
||||
if err := cmd.Wait(); err != nil {
|
||||
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
|
||||
pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
|
||||
} else {
|
||||
pipeW.Close()
|
||||
}
|
||||
close(chdone)
|
||||
}()
|
||||
// Run the command and return the pipe
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pipeR, nil
|
||||
|
||||
return pipeR, chdone, nil
|
||||
}
|
||||
|
||||
// NewTempArchive reads the content of src into a temporary file, and returns the contents
|
||||
|
@ -872,6 +984,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
|
|||
return &TempArchive{File: f, Size: size}, nil
|
||||
}
|
||||
|
||||
// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
type TempArchive struct {
|
||||
*os.File
|
||||
Size int64 // Pre-computed from Stat().Size() as a convenience
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6,11 +6,26 @@ import (
|
|||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a seperate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return srcPath + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
|
@ -25,7 +40,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
|
@ -33,10 +48,9 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
|
|||
return
|
||||
}
|
||||
|
||||
nlink = uint32(s.Nlink)
|
||||
inode = uint64(s.Ino)
|
||||
|
||||
// Currently go does not fil in the major/minors
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||
s.Mode&syscall.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||
|
@ -46,6 +60,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
|
|||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return int(s.Uid), int(s.Gid), nil
|
||||
}
|
||||
|
||||
func major(device uint64) uint64 {
|
||||
return (device >> 8) & 0xfff
|
||||
}
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCanonicalTarNameForPath(t *testing.T) {
|
||||
cases := []struct{ in, expected string }{
|
||||
{"foo", "foo"},
|
||||
{"foo/bar", "foo/bar"},
|
||||
{"foo/dir/", "foo/dir/"},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out, err := CanonicalTarNameForPath(v.in); err != nil {
|
||||
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
|
||||
} else if out != v.expected {
|
||||
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalTarName(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
isDir bool
|
||||
expected string
|
||||
}{
|
||||
{"foo", false, "foo"},
|
||||
{"foo", true, "foo/"},
|
||||
{"foo/bar", false, "foo/bar"},
|
||||
{"foo/bar", true, "foo/bar/"},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out, err := canonicalTarName(v.in, v.isDir); err != nil {
|
||||
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
|
||||
} else if out != v.expected {
|
||||
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChmodTarEntry(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, expected os.FileMode
|
||||
}{
|
||||
{0000, 0000},
|
||||
{0777, 0777},
|
||||
{0644, 0644},
|
||||
{0755, 0755},
|
||||
{0444, 0444},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out := chmodTarEntry(v.in); out != v.expected {
|
||||
t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -6,10 +6,25 @@ import (
|
|||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// canonicalTarNameForPath returns platform-specific filepath
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a seperate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
|
@ -34,7 +49,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|||
return perm
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
@ -48,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
|||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return 0, 0, nil
|
||||
}
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCanonicalTarNameForPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, expected string
|
||||
shouldFail bool
|
||||
}{
|
||||
{"foo", "foo", false},
|
||||
{"foo/bar", "___", true}, // unix-styled windows path must fail
|
||||
{`foo\bar`, "foo/bar", false},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
|
||||
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
|
||||
} else if v.shouldFail && err == nil {
|
||||
t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
|
||||
} else if !v.shouldFail && out != v.expected {
|
||||
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanonicalTarName(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
isDir bool
|
||||
expected string
|
||||
}{
|
||||
{"foo", false, "foo"},
|
||||
{"foo", true, "foo/"},
|
||||
{`foo\bar`, false, "foo/bar"},
|
||||
{`foo\bar`, true, "foo/bar/"},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out, err := canonicalTarName(v.in, v.isDir); err != nil {
|
||||
t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
|
||||
} else if out != v.expected {
|
||||
t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChmodTarEntry(t *testing.T) {
|
||||
cases := []struct {
|
||||
in, expected os.FileMode
|
||||
}{
|
||||
{0000, 0111},
|
||||
{0777, 0755},
|
||||
{0644, 0755},
|
||||
{0755, 0755},
|
||||
{0444, 0555},
|
||||
}
|
||||
for _, v := range cases {
|
||||
if out := chmodTarEntry(v.in); out != v.expected {
|
||||
t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -14,18 +14,27 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
|
@ -94,7 +103,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
|||
}
|
||||
|
||||
// Skip AUFS metadata
|
||||
if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
|
||||
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -105,8 +114,8 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
|||
// Find out what kind of modification happened
|
||||
file := filepath.Base(path)
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(file, ".wh.") {
|
||||
originalFile := file[len(".wh."):]
|
||||
if strings.HasPrefix(file, WhiteoutPrefix) {
|
||||
originalFile := file[len(WhiteoutPrefix):]
|
||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
|
@ -161,20 +170,22 @@ func Changes(layers []string, rw string) ([]Change, error) {
|
|||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.Stat_t
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
func (root *FileInfo) LookUp(path string) *FileInfo {
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := root
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return root
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
|
@ -275,6 +286,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
|
@ -316,13 +328,29 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
|||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var size int64
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, _ := os.Lstat(file)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
size += fileInfo.Size()
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -330,13 +358,15 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
|||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change) (Archive, error) {
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := &tarAppender{
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: uidMaps,
|
||||
GIDMaps: gidMaps,
|
||||
}
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
@ -351,7 +381,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
|
|||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHardLinkOrder(t *testing.T) {
|
||||
names := []string{"file1.txt", "file2.txt", "file3.txt"}
|
||||
msg := []byte("Hey y'all")
|
||||
|
||||
// Create dir
|
||||
src, err := ioutil.TempDir("", "docker-hardlink-test-src-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
//defer os.RemoveAll(src)
|
||||
for _, name := range names {
|
||||
func() {
|
||||
fh, err := os.Create(path.Join(src, name))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer fh.Close()
|
||||
if _, err = fh.Write(msg); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Create dest, with changes that includes hardlinks
|
||||
dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
os.RemoveAll(dest) // we just want the name, at first
|
||||
if err := copyDir(src, dest); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dest)
|
||||
for _, name := range names {
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get changes
|
||||
changes, err := ChangesDirs(dest, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// sort
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// ExportChanges
|
||||
ar, err := ExportChanges(dest, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdrs, err := walkHeaders(ar)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// reverse sort
|
||||
sort.Sort(sort.Reverse(changesByPath(changes)))
|
||||
// ExportChanges
|
||||
arRev, err := ExportChanges(dest, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdrsRev, err := walkHeaders(arRev)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// line up the two sets
|
||||
sort.Sort(tarHeaders(hdrs))
|
||||
sort.Sort(tarHeaders(hdrsRev))
|
||||
|
||||
// compare Size and LinkName
|
||||
for i := range hdrs {
|
||||
if hdrs[i].Name != hdrsRev[i].Name {
|
||||
t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name)
|
||||
}
|
||||
if hdrs[i].Size != hdrsRev[i].Size {
|
||||
t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size)
|
||||
}
|
||||
if hdrs[i].Typeflag != hdrsRev[i].Typeflag {
|
||||
t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag)
|
||||
}
|
||||
if hdrs[i].Linkname != hdrsRev[i].Linkname {
|
||||
t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type tarHeaders []tar.Header
|
||||
|
||||
func (th tarHeaders) Len() int { return len(th) }
|
||||
func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] }
|
||||
func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name }
|
||||
|
||||
func walkHeaders(r io.Reader) ([]tar.Header, error) {
|
||||
t := tar.NewReader(r)
|
||||
headers := []tar.Header{}
|
||||
for {
|
||||
hdr, err := t.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return headers, err
|
||||
}
|
||||
headers = append(headers, *hdr)
|
||||
}
|
||||
return headers, nil
|
||||
}
|
|
@ -1,495 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func max(x, y int) int {
|
||||
if x >= y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
func copyDir(src, dst string) error {
|
||||
cmd := exec.Command("cp", "-a", src, dst)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FileType uint32
|
||||
|
||||
const (
|
||||
Regular FileType = iota
|
||||
Dir
|
||||
Symlink
|
||||
)
|
||||
|
||||
type FileData struct {
|
||||
filetype FileType
|
||||
path string
|
||||
contents string
|
||||
permissions os.FileMode
|
||||
}
|
||||
|
||||
func createSampleDir(t *testing.T, root string) {
|
||||
files := []FileData{
|
||||
{Regular, "file1", "file1\n", 0600},
|
||||
{Regular, "file2", "file2\n", 0666},
|
||||
{Regular, "file3", "file3\n", 0404},
|
||||
{Regular, "file4", "file4\n", 0600},
|
||||
{Regular, "file5", "file5\n", 0600},
|
||||
{Regular, "file6", "file6\n", 0600},
|
||||
{Regular, "file7", "file7\n", 0600},
|
||||
{Dir, "dir1", "", 0740},
|
||||
{Regular, "dir1/file1-1", "file1-1\n", 01444},
|
||||
{Regular, "dir1/file1-2", "file1-2\n", 0666},
|
||||
{Dir, "dir2", "", 0700},
|
||||
{Regular, "dir2/file2-1", "file2-1\n", 0666},
|
||||
{Regular, "dir2/file2-2", "file2-2\n", 0666},
|
||||
{Dir, "dir3", "", 0700},
|
||||
{Regular, "dir3/file3-1", "file3-1\n", 0666},
|
||||
{Regular, "dir3/file3-2", "file3-2\n", 0666},
|
||||
{Dir, "dir4", "", 0700},
|
||||
{Regular, "dir4/file3-1", "file4-1\n", 0666},
|
||||
{Regular, "dir4/file3-2", "file4-2\n", 0666},
|
||||
{Symlink, "symlink1", "target1", 0666},
|
||||
{Symlink, "symlink2", "target2", 0666},
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
for _, info := range files {
|
||||
p := path.Join(root, info.path)
|
||||
if info.filetype == Dir {
|
||||
if err := os.MkdirAll(p, info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Regular {
|
||||
if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
} else if info.filetype == Symlink {
|
||||
if err := os.Symlink(info.contents, p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if info.filetype != Symlink {
|
||||
// Set a consistent ctime, atime for all files and dirs
|
||||
if err := os.Chtimes(p, now, now); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangeString(t *testing.T) {
|
||||
modifiyChange := Change{"change", ChangeModify}
|
||||
toString := modifiyChange.String()
|
||||
if toString != "C change" {
|
||||
t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString)
|
||||
}
|
||||
addChange := Change{"change", ChangeAdd}
|
||||
toString = addChange.String()
|
||||
if toString != "A change" {
|
||||
t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString)
|
||||
}
|
||||
deleteChange := Change{"change", ChangeDelete}
|
||||
toString = deleteChange.String()
|
||||
if toString != "D change" {
|
||||
t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesWithNoChanges(t *testing.T) {
|
||||
rwLayer, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rwLayer)
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(layer)
|
||||
createSampleDir(t, layer)
|
||||
changes, err := Changes([]string{layer}, rwLayer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesWithChanges(t *testing.T) {
|
||||
// Mock the readonly layer
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test-layer")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(layer)
|
||||
createSampleDir(t, layer)
|
||||
os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
|
||||
|
||||
// Mock the RW layer
|
||||
rwLayer, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(rwLayer)
|
||||
|
||||
// Create a folder in RW layer
|
||||
dir1 := path.Join(rwLayer, "dir1")
|
||||
os.MkdirAll(dir1, 0740)
|
||||
deletedFile := path.Join(dir1, ".wh.file1-2")
|
||||
ioutil.WriteFile(deletedFile, []byte{}, 0600)
|
||||
modifiedFile := path.Join(dir1, "file1-1")
|
||||
ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444)
|
||||
// Let's add a subfolder for a newFile
|
||||
subfolder := path.Join(dir1, "subfolder")
|
||||
os.MkdirAll(subfolder, 0740)
|
||||
newFile := path.Join(subfolder, "newFile")
|
||||
ioutil.WriteFile(newFile, []byte{}, 0740)
|
||||
|
||||
changes, err := Changes([]string{layer}, rwLayer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1", ChangeModify},
|
||||
{"/dir1/file1-1", ChangeModify},
|
||||
{"/dir1/file1-2", ChangeDelete},
|
||||
{"/dir1/subfolder", ChangeModify},
|
||||
{"/dir1/subfolder/newFile", ChangeAdd},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
}
|
||||
|
||||
// See https://github.com/docker/docker/pull/13590
|
||||
func TestChangesWithChangesGH13590(t *testing.T) {
|
||||
baseLayer, err := ioutil.TempDir("", "docker-changes-test.")
|
||||
defer os.RemoveAll(baseLayer)
|
||||
|
||||
dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
|
||||
os.MkdirAll(dir3, 07400)
|
||||
|
||||
file := path.Join(dir3, "file.txt")
|
||||
ioutil.WriteFile(file, []byte("hello"), 0666)
|
||||
|
||||
layer, err := ioutil.TempDir("", "docker-changes-test2.")
|
||||
defer os.RemoveAll(layer)
|
||||
|
||||
// Test creating a new file
|
||||
if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
|
||||
t.Fatalf("Cmd failed: %q", err)
|
||||
}
|
||||
|
||||
os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))
|
||||
file = path.Join(layer, "dir1/dir2/dir3/file1.txt")
|
||||
ioutil.WriteFile(file, []byte("bye"), 0666)
|
||||
|
||||
changes, err := Changes([]string{baseLayer}, layer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1/dir2/dir3", ChangeModify},
|
||||
{"/dir1/dir2/dir3/file1.txt", ChangeAdd},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
|
||||
// Now test changing a file
|
||||
layer, err = ioutil.TempDir("", "docker-changes-test3.")
|
||||
defer os.RemoveAll(layer)
|
||||
|
||||
if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
|
||||
t.Fatalf("Cmd failed: %q", err)
|
||||
}
|
||||
|
||||
file = path.Join(layer, "dir1/dir2/dir3/file.txt")
|
||||
ioutil.WriteFile(file, []byte("bye"), 0666)
|
||||
|
||||
changes, err = Changes([]string{baseLayer}, layer)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedChanges = []Change{
|
||||
{"/dir1/dir2/dir3/file.txt", ChangeModify},
|
||||
}
|
||||
checkChanges(expectedChanges, changes, t)
|
||||
}
|
||||
|
||||
// Create an directory, copy it, make sure we report no changes between the two
|
||||
func TestChangesDirsEmpty(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(src)
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dst)
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
t.Fatalf("Reported changes for identical dirs: %v", changes)
|
||||
}
|
||||
os.RemoveAll(src)
|
||||
os.RemoveAll(dst)
|
||||
}
|
||||
|
||||
func mutateSampleDir(t *testing.T, root string) {
|
||||
// Remove a regular file
|
||||
if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a directory
|
||||
if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Remove a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Rewrite a file
|
||||
if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace a file
|
||||
if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch file
|
||||
if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace file with dir
|
||||
if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new file
|
||||
if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create new dir
|
||||
if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a new symlink
|
||||
if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Change a symlink
|
||||
if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Replace dir with file
|
||||
if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Touch dir
|
||||
if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesDirsMutated(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(src)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
mutateSampleDir(t, dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
expectedChanges := []Change{
|
||||
{"/dir1", ChangeDelete},
|
||||
{"/dir2", ChangeModify},
|
||||
{"/dirnew", ChangeAdd},
|
||||
{"/file1", ChangeDelete},
|
||||
{"/file2", ChangeModify},
|
||||
{"/file3", ChangeModify},
|
||||
{"/file4", ChangeModify},
|
||||
{"/file5", ChangeModify},
|
||||
{"/filenew", ChangeAdd},
|
||||
{"/symlink1", ChangeDelete},
|
||||
{"/symlink2", ChangeModify},
|
||||
{"/symlinknew", ChangeAdd},
|
||||
}
|
||||
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyLayer(t *testing.T) {
|
||||
src, err := ioutil.TempDir("", "docker-changes-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createSampleDir(t, src)
|
||||
defer os.RemoveAll(src)
|
||||
dst := src + "-copy"
|
||||
if err := copyDir(src, dst); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mutateSampleDir(t, dst)
|
||||
defer os.RemoveAll(dst)
|
||||
|
||||
changes, err := ChangesDirs(dst, src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layer, err := ExportChanges(dst, changes)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
layerCopy, err := NewTempArchive(layer, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := ApplyLayer(src, layerCopy); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
changes2, err := ChangesDirs(src, dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(changes2) != 0 {
|
||||
t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesSizeWithNoChanges(t *testing.T) {
|
||||
size := ChangesSize("/tmp", nil)
|
||||
if size != 0 {
|
||||
t.Fatalf("ChangesSizes with no changes should be 0, was %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) {
|
||||
changes := []Change{
|
||||
{Path: "deletedPath", Kind: ChangeDelete},
|
||||
}
|
||||
size := ChangesSize("/tmp", changes)
|
||||
if size != 0 {
|
||||
t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChangesSize(t *testing.T) {
|
||||
parentPath, err := ioutil.TempDir("", "docker-changes-test")
|
||||
defer os.RemoveAll(parentPath)
|
||||
addition := path.Join(parentPath, "addition")
|
||||
if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
modification := path.Join(parentPath, "modification")
|
||||
if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
changes := []Change{
|
||||
{Path: "addition", Kind: ChangeAdd},
|
||||
{Path: "modification", Kind: ChangeModify},
|
||||
}
|
||||
size := ChangesSize(parentPath, changes)
|
||||
if size != 6 {
|
||||
t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
|
||||
}
|
||||
}
|
||||
|
||||
func checkChanges(expectedChanges, changes []Change, t *testing.T) {
|
||||
sort.Sort(changesByPath(expectedChanges))
|
||||
sort.Sort(changesByPath(changes))
|
||||
for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
|
||||
if i >= len(expectedChanges) {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
}
|
||||
if i >= len(changes) {
|
||||
t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
|
||||
}
|
||||
if changes[i].Path == expectedChanges[i].Path {
|
||||
if changes[i] != expectedChanges[i] {
|
||||
t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
|
||||
}
|
||||
} else if changes[i].Path < expectedChanges[i].Path {
|
||||
t.Fatalf("unexpected change %s\n", changes[i].String())
|
||||
} else {
|
||||
t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,16 +3,17 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Uid() != newStat.Uid() ||
|
||||
oldStat.Gid() != newStat.Gid() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||
|
@ -25,3 +26,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
|||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.ModTime() != newStat.ModTime() ||
|
||||
|
@ -18,3 +20,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
|
|||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -6,11 +6,11 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
|
@ -29,8 +29,12 @@ var (
|
|||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in a path separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||
if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) {
|
||||
if !HasTrailingPathSeparator(cleanedPath) {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = normalizePath(cleanedPath)
|
||||
originalPath = normalizePath(originalPath)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(filepath.Separator)
|
||||
|
@ -38,60 +42,60 @@ func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
|||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||
cleanedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// AssertsDirectory returns whether the given path is
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func AssertsDirectory(path string) bool {
|
||||
return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path)
|
||||
func assertsDirectory(path string) bool {
|
||||
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// HasTrailingPathSeparator returns whether the given
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func HasTrailingPathSeparator(path string) bool {
|
||||
func hasTrailingPathSeparator(path string) bool {
|
||||
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||
}
|
||||
|
||||
// SpecifiesCurrentDir returns whether the given path specifies
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func SpecifiesCurrentDir(path string) bool {
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its
|
||||
// parent directory and its basename in that directory.
|
||||
func SplitPathDirEntry(localizedPath string) (dir, base string) {
|
||||
normalizedPath := filepath.ToSlash(localizedPath)
|
||||
vol := filepath.VolumeName(normalizedPath)
|
||||
normalizedPath = normalizedPath[len(vol):]
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(normalizePath(path))
|
||||
|
||||
if normalizedPath == "/" {
|
||||
// Specifies the root path.
|
||||
return filepath.FromSlash(vol + normalizedPath), "."
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
trimmedPath := vol + strings.TrimRight(normalizedPath, "/")
|
||||
|
||||
dir = filepath.FromSlash(path.Dir(trimmedPath))
|
||||
base = filepath.FromSlash(path.Base(trimmedPath))
|
||||
|
||||
return dir, base
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource at the given sourcePath into a Tar
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourcePath string) (content Archive, err error) {
|
||||
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
|
@ -99,22 +103,6 @@ func TarResource(sourcePath string) (content Archive, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) {
|
||||
// In the case where the source path is a symbolic link AND it ends
|
||||
// with a path separator, we will want to evaluate the symbolic link.
|
||||
trimmedPath := sourcePath[:len(sourcePath)-1]
|
||||
stat, err := os.Lstat(trimmedPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stat.Mode()&os.ModeSymlink != 0 {
|
||||
if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Separate the source path between it's directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
|
@ -127,39 +115,150 @@ func TarResource(sourcePath string) (content Archive, err error) {
|
|||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoStatPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource. If mustExist is true, then
|
||||
// it is an error if there is no file or directory at the given path.
|
||||
func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) {
|
||||
pathInfo := CopyInfo{Path: path}
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string) (CopyInfo, error) {
|
||||
// Split the given path into its Directory and Base components. We will
|
||||
// evaluate symlinks in the directory component then append the base.
|
||||
path = normalizePath(path)
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
fileInfo, err := os.Lstat(path)
|
||||
|
||||
if err == nil {
|
||||
pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir()
|
||||
} else if os.IsNotExist(err) && !mustExist {
|
||||
err = nil
|
||||
resolvedDirPath, err := filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return pathInfo, err
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath
|
||||
|
||||
var rebaseName string
|
||||
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
|
@ -189,7 +288,7 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
|
|||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case AssertsDirectory(dstInfo.Path):
|
||||
case assertsDirectory(dstInfo.Path):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
|
@ -208,8 +307,15 @@ func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (ds
|
|||
}
|
||||
|
||||
// rebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurance of oldBase with newBase at the beginning of entry names.
|
||||
func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive {
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
|
@ -255,15 +361,19 @@ func CopyResource(srcPath, dstPath string) error {
|
|||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||
|
||||
if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil {
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcPath)
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -274,25 +384,14 @@ func CopyResource(srcPath, dstPath string) error {
|
|||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error {
|
||||
dstInfo, err := CopyInfoStatPath(dstPath, false)
|
||||
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dstInfo.Exists {
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(dstPath)
|
||||
|
||||
dstStat, err := os.Lstat(dstParent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !dstStat.IsDir() {
|
||||
return ErrNotDirectory
|
||||
}
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,637 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func removeAllPaths(paths ...string) {
|
||||
for _, path := range paths {
|
||||
os.RemoveAll(path)
|
||||
}
|
||||
}
|
||||
|
||||
func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) {
|
||||
var err error
|
||||
|
||||
if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func isNotDir(err error) bool {
|
||||
return strings.Contains(err.Error(), "not a directory")
|
||||
}
|
||||
|
||||
func joinTrailingSep(pathElements ...string) string {
|
||||
joined := filepath.Join(pathElements...)
|
||||
|
||||
return fmt.Sprintf("%s%c", joined, filepath.Separator)
|
||||
}
|
||||
|
||||
func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) {
|
||||
t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB)
|
||||
|
||||
fileA, err := os.Open(filenameA)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fileA.Close()
|
||||
|
||||
fileB, err := os.Open(filenameB)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fileB.Close()
|
||||
|
||||
hasher := sha256.New()
|
||||
|
||||
if _, err = io.Copy(hasher, fileA); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hashA := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
|
||||
if _, err = io.Copy(hasher, fileB); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hashB := hasher.Sum(nil)
|
||||
|
||||
if !bytes.Equal(hashA, hashB) {
|
||||
err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) {
|
||||
t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir)
|
||||
|
||||
var changes []Change
|
||||
|
||||
if changes, err = ChangesDirs(newDir, oldDir); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(changes) != 0 {
|
||||
err = fmt.Errorf("expected no changes between directories, but got: %v", changes)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func logDirContents(t *testing.T, dirPath string) {
|
||||
logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
t.Errorf("stat error for path %q: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
path = joinTrailingSep(path)
|
||||
}
|
||||
|
||||
t.Logf("\t%s", path)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
t.Logf("logging directory contents: %q", dirPath)
|
||||
|
||||
if err := filepath.Walk(dirPath, logWalkedPaths); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) {
|
||||
t.Logf("copying from %q to %q", srcPath, dstPath)
|
||||
|
||||
return CopyResource(srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Basic assumptions about SRC and DST:
|
||||
// 1. SRC must exist.
|
||||
// 2. If SRC ends with a trailing separator, it must be a directory.
|
||||
// 3. DST parent directory must exist.
|
||||
// 4. If DST exists as a file, it must not end with a trailing separator.
|
||||
|
||||
// First get these easy error cases out of the way.
|
||||
|
||||
// Test for error when SRC does not exist.
|
||||
func TestCopyErrSrcNotExists(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
content, err := TarResource(filepath.Join(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when SRC ends in a trailing
|
||||
// path separator but it exists as a file.
|
||||
func TestCopyErrSrcNotDir(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
content, err := TarResource(joinTrailingSep(tmpDirA, "file1"))
|
||||
if err == nil {
|
||||
content.Close()
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when SRC is a valid file or directory,
|
||||
// but the DST parent directory does not exist.
|
||||
func TestCopyErrDstParentNotExists(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
// Try with a file source.
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
// Copy to a file whose parent does not exist.
|
||||
if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil {
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
// Copy to a directory whose parent does not exist.
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil {
|
||||
t.Fatal("expected IsNotExist error, but got nil instead")
|
||||
}
|
||||
|
||||
if !os.IsNotExist(err) {
|
||||
t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test for error when DST ends in a trailing
|
||||
// path separator but exists as a file.
|
||||
func TestCopyErrDstNotDir(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
// Try with a file source.
|
||||
srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
|
||||
|
||||
content, err := TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
|
||||
// Try with a directory source.
|
||||
srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
|
||||
|
||||
content, err = TarResource(srcInfo.Path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
|
||||
t.Fatal("expected IsNotDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if !isNotDir(err) {
|
||||
t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Possibilities are reduced to the remaining 10 cases:
|
||||
//
|
||||
// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
|
||||
// ===================================================================================================
|
||||
// A | no | - | no | - | no | create file
|
||||
// B | no | - | no | - | yes | error
|
||||
// C | no | - | yes | no | - | overwrite file
|
||||
// D | no | - | yes | yes | - | create file in dst dir
|
||||
// E | yes | no | no | - | - | create dir, copy contents
|
||||
// F | yes | no | yes | no | - | error
|
||||
// G | yes | no | yes | yes | - | copy dir and contents
|
||||
// H | yes | yes | no | - | - | create dir, copy contents
|
||||
// I | yes | yes | yes | no | - | error
|
||||
// J | yes | yes | yes | yes | - | copy dir contents
|
||||
//
|
||||
|
||||
// A. SRC specifies a file and DST (no trailing path separator) doesn't
|
||||
// exist. This should create a file with the name DST and copy the
|
||||
// contents of the source file into it.
|
||||
func TestCopyCaseA(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstPath := filepath.Join(tmpDirB, "itWorks.txt")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// B. SRC specifies a file and DST (with trailing path separator) doesn't
|
||||
// exist. This should cause an error because the copy operation cannot
|
||||
// create a directory when copying a single file.
|
||||
func TestCopyCaseB(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstDir := joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err == nil {
|
||||
t.Fatal("expected ErrDirNotExists error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrDirNotExists {
|
||||
t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// C. SRC specifies a file and DST exists as a file. This should overwrite
|
||||
// the file at DST with the contents of the source file.
|
||||
func TestCopyCaseC(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstPath := filepath.Join(tmpDirB, "file2")
|
||||
|
||||
var err error
|
||||
|
||||
// Ensure they start out different.
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err == nil {
|
||||
t.Fatal("expected different file contents")
|
||||
}
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstPath); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// D. SRC specifies a file and DST exists as a directory. This should place
|
||||
// a copy of the source file inside it using the basename from SRC. Ensure
|
||||
// this works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseD(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcPath := filepath.Join(tmpDirA, "file1")
|
||||
dstDir := filepath.Join(tmpDirB, "dir1")
|
||||
dstPath := filepath.Join(dstDir, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
// Ensure that dstPath doesn't exist.
|
||||
if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
|
||||
t.Fatalf("did not expect dstPath %q to exist", dstPath)
|
||||
}
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir1")
|
||||
|
||||
if err = testCopyHelper(t, srcPath, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// E. SRC specifies a directory and DST does not exist. This should create a
|
||||
// directory at DST and copy the contents of the SRC directory into the DST
|
||||
// directory. Ensure this works whether DST has a trailing path separator or
|
||||
// not.
|
||||
func TestCopyCaseE(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstDir := filepath.Join(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// F. SRC specifies a directory and DST exists as a file. This should cause an
|
||||
// error as it is not possible to overwrite a file with a directory.
|
||||
func TestCopyCaseF(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstFile := filepath.Join(tmpDirB, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
|
||||
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrCannotCopyDir {
|
||||
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// G. SRC specifies a directory and DST exists as a directory. This should copy
|
||||
// the SRC directory and all its contents to the DST directory. Ensure this
|
||||
// works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseG(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := filepath.Join(tmpDirA, "dir1")
|
||||
dstDir := filepath.Join(tmpDirB, "dir2")
|
||||
resultDir := filepath.Join(dstDir, "dir1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir2")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// H. SRC specifies a directory's contents only and DST does not exist. This
|
||||
// should create a directory at DST and copy the contents of the SRC
|
||||
// directory (but not the directory itself) into the DST directory. Ensure
|
||||
// this works whether DST has a trailing path separator or not.
|
||||
func TestCopyCaseH(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstDir := filepath.Join(tmpDirB, "testDir")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "testDir")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Log("dir contents not equal")
|
||||
logDirContents(t, tmpDirA)
|
||||
logDirContents(t, tmpDirB)
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// I. SRC specifies a directory's contents only and DST exists as a file. This
|
||||
// should cause an error as it is not possible to overwrite a file with a
|
||||
// directory.
|
||||
func TestCopyCaseI(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstFile := filepath.Join(tmpDirB, "file1")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstFile); err == nil {
|
||||
t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
|
||||
}
|
||||
|
||||
if err != ErrCannotCopyDir {
|
||||
t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
// J. SRC specifies a directory's contents only and DST exists as a directory.
|
||||
// This should copy the contents of the SRC directory (but not the directory
|
||||
// itself) into the DST directory. Ensure this works whether DST has a
|
||||
// trailing path separator or not.
|
||||
func TestCopyCaseJ(t *testing.T) {
|
||||
tmpDirA, tmpDirB := getTestTempDirs(t)
|
||||
defer removeAllPaths(tmpDirA, tmpDirB)
|
||||
|
||||
// Load A and B with some sample files and directories.
|
||||
createSampleDir(t, tmpDirA)
|
||||
createSampleDir(t, tmpDirB)
|
||||
|
||||
srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
|
||||
dstDir := filepath.Join(tmpDirB, "dir5")
|
||||
|
||||
var err error
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Now try again but using a trailing path separator for dstDir.
|
||||
|
||||
if err = os.RemoveAll(dstDir); err != nil {
|
||||
t.Fatalf("unable to remove dstDir: %s", err)
|
||||
}
|
||||
|
||||
if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
|
||||
t.Fatalf("unable to make dstDir: %s", err)
|
||||
}
|
||||
|
||||
dstDir = joinTrailingSep(tmpDirB, "dir5")
|
||||
|
||||
if err = testCopyHelper(t, srcDir, dstDir); err != nil {
|
||||
t.Fatalf("unexpected error %T: %s", err, err)
|
||||
}
|
||||
|
||||
if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -9,23 +9,40 @@ import (
|
|||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
|
@ -55,7 +72,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertantly.
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
|
@ -80,11 +97,11 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.") {
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
|
@ -97,7 +114,10 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
return 0, err
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
|
@ -111,11 +131,25 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, ".wh.") {
|
||||
originalBase := base[len(".wh."):]
|
||||
originalPath := filepath.Join(filepath.Dir(path), originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
fi, err := os.Lstat(dir)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := os.Mkdir(dir, fi.Mode()&os.ModePerm); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
|
@ -136,7 +170,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
|
@ -150,6 +184,27 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
srcData = tmpFile
|
||||
}
|
||||
|
||||
// if the options contain a uid & gid maps, convert header uid/gid
|
||||
// entries using the maps such that lchown sets the proper mapped
|
||||
// uid/gid after writing the file. We only perform this mapping if
|
||||
// the file isn't already owned by the remapped root UID or GID, as
|
||||
// that specific uid/gid has no mapping from container -> host, and
|
||||
// those files already have the proper ownership for inside the
|
||||
// container.
|
||||
if srcHdr.Uid != remappedRootUID {
|
||||
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
srcHdr.Uid = xUID
|
||||
}
|
||||
if srcHdr.Gid != remappedRootGID {
|
||||
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
srcHdr.Gid = xGID
|
||||
}
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -164,8 +219,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
|
||||
if err := syscall.UtimesNano(path, ts); err != nil {
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
@ -177,20 +231,20 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
|
|||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, true)
|
||||
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, false)
|
||||
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
|
||||
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
|
@ -206,5 +260,5 @@ func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64
|
|||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer)
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
||||
|
|
|
@ -1,190 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestApplyLayerInvalidFilenames(t *testing.T) {
|
||||
for i, headers := range [][]*tar.Header{
|
||||
{
|
||||
{
|
||||
Name: "../victim/dotdot",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{
|
||||
{
|
||||
// Note the leading slash
|
||||
Name: "/../victim/slash-dotdot",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
} {
|
||||
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
|
||||
t.Fatalf("i=%d. %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyLayerInvalidHardlink(t *testing.T) {
|
||||
for i, headers := range [][]*tar.Header{
|
||||
{ // try reading victim/hello (../)
|
||||
{
|
||||
Name: "dotdot",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "../victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try reading victim/hello (/../)
|
||||
{
|
||||
Name: "slash-dotdot",
|
||||
Typeflag: tar.TypeLink,
|
||||
// Note the leading slash
|
||||
Linkname: "/../victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try writing victim/file
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "loophole-victim/file",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try reading victim/hello (hardlink, symlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "symlink",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "loophole-victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // Try reading victim/hello (hardlink, hardlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "hardlink",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "loophole-victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // Try removing victim directory (hardlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
} {
|
||||
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
|
||||
t.Fatalf("i=%d. %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyLayerInvalidSymlink(t *testing.T) {
|
||||
for i, headers := range [][]*tar.Header{
|
||||
{ // try reading victim/hello (../)
|
||||
{
|
||||
Name: "dotdot",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "../victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try reading victim/hello (/../)
|
||||
{
|
||||
Name: "slash-dotdot",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
// Note the leading slash
|
||||
Linkname: "/../victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try writing victim/file
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "loophole-victim/file",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try reading victim/hello (symlink, symlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "symlink",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "loophole-victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try reading victim/hello (symlink, hardlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "hardlink",
|
||||
Typeflag: tar.TypeLink,
|
||||
Linkname: "loophole-victim/hello",
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
{ // try removing victim directory (symlink)
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeSymlink,
|
||||
Linkname: "../victim",
|
||||
Mode: 0755,
|
||||
},
|
||||
{
|
||||
Name: "loophole-victim",
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
} {
|
||||
if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
|
||||
t.Fatalf("i=%d. %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
var testUntarFns = map[string]func(string, io.Reader) error{
|
||||
"untar": func(dest string, r io.Reader) error {
|
||||
return Untar(r, dest, nil)
|
||||
},
|
||||
"applylayer": func(dest string, r io.Reader) error {
|
||||
_, err := ApplyLayer(dest, ArchiveReader(r))
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
// testBreakout is a helper function that, within the provided `tmpdir` directory,
|
||||
// creates a `victim` folder with a generated `hello` file in it.
|
||||
// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
|
||||
//
|
||||
// Here are the tested scenarios:
|
||||
// - removed `victim` folder (write)
|
||||
// - removed files from `victim` folder (write)
|
||||
// - new files in `victim` folder (write)
|
||||
// - modified files in `victim` folder (write)
|
||||
// - file in `dest` with same content as `victim/hello` (read)
|
||||
//
|
||||
// When using testBreakout make sure you cover one of the scenarios listed above.
|
||||
func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
|
||||
tmpdir, err := ioutil.TempDir("", tmpdir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
dest := filepath.Join(tmpdir, "dest")
|
||||
if err := os.Mkdir(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
victim := filepath.Join(tmpdir, "victim")
|
||||
if err := os.Mkdir(victim, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
hello := filepath.Join(victim, "hello")
|
||||
helloData, err := time.Now().MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
helloStat, err := os.Stat(hello)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
t := tar.NewWriter(writer)
|
||||
for _, hdr := range headers {
|
||||
t.WriteHeader(hdr)
|
||||
}
|
||||
t.Close()
|
||||
}()
|
||||
|
||||
untar := testUntarFns[untarFn]
|
||||
if untar == nil {
|
||||
return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
|
||||
}
|
||||
if err := untar(dest, reader); err != nil {
|
||||
if _, ok := err.(breakoutError); !ok {
|
||||
// If untar returns an error unrelated to an archive breakout,
|
||||
// then consider this an unexpected error and abort.
|
||||
return err
|
||||
}
|
||||
// Here, untar detected the breakout.
|
||||
// Let's move on verifying that indeed there was no breakout.
|
||||
fmt.Printf("breakoutError: %v\n", err)
|
||||
}
|
||||
|
||||
// Check victim folder
|
||||
f, err := os.Open(victim)
|
||||
if err != nil {
|
||||
// codepath taken if victim folder was removed
|
||||
return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Check contents of victim folder
|
||||
//
|
||||
// We are only interested in getting 2 files from the victim folder, because if all is well
|
||||
// we expect only one result, the `hello` file. If there is a second result, it cannot
|
||||
// hold the same name `hello` and we assume that a new file got created in the victim folder.
|
||||
// That is enough to detect an archive breakout.
|
||||
names, err := f.Readdirnames(2)
|
||||
if err != nil {
|
||||
// codepath taken if victim is not a folder
|
||||
return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
|
||||
}
|
||||
for _, name := range names {
|
||||
if name != "hello" {
|
||||
// codepath taken if new file was created in victim folder
|
||||
return fmt.Errorf("archive breakout: new file %q", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check victim/hello
|
||||
f, err = os.Open(hello)
|
||||
if err != nil {
|
||||
// codepath taken if read permissions were removed
|
||||
return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
|
||||
}
|
||||
defer f.Close()
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if helloStat.IsDir() != fi.IsDir() ||
|
||||
// TODO: cannot check for fi.ModTime() change
|
||||
helloStat.Mode() != fi.Mode() ||
|
||||
helloStat.Size() != fi.Size() ||
|
||||
!bytes.Equal(helloData, b) {
|
||||
// codepath taken if hello has been modified
|
||||
return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
|
||||
}
|
||||
|
||||
// Check that nothing in dest/ has the same content as victim/hello.
|
||||
// Since victim/hello was generated with time.Now(), it is safe to assume
|
||||
// that any file whose content matches exactly victim/hello, managed somehow
|
||||
// to access victim/hello.
|
||||
return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
if err != nil {
|
||||
// skip directory if error
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// enter directory
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
// skip file if error
|
||||
return nil
|
||||
}
|
||||
b, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
// Houston, we have a problem. Aborting (space)walk.
|
||||
return err
|
||||
}
|
||||
if bytes.Equal(helloData, b) {
|
||||
return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
23
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
23
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package archive
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for remoing an actaul file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
|
@ -1,98 +0,0 @@
|
|||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGenerateEmptyFile(t *testing.T) {
|
||||
archive, err := Generate("emptyFile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if archive == nil {
|
||||
t.Fatal("The generated archive should not be nil.")
|
||||
}
|
||||
|
||||
expectedFiles := [][]string{
|
||||
{"emptyFile", ""},
|
||||
}
|
||||
|
||||
tr := tar.NewReader(archive)
|
||||
actualFiles := make([][]string, 0, 10)
|
||||
i := 0
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(tr)
|
||||
content := buf.String()
|
||||
actualFiles = append(actualFiles, []string{hdr.Name, content})
|
||||
i++
|
||||
}
|
||||
if len(actualFiles) != len(expectedFiles) {
|
||||
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
|
||||
}
|
||||
for i := 0; i < len(expectedFiles); i++ {
|
||||
actual := actualFiles[i]
|
||||
expected := expectedFiles[i]
|
||||
if actual[0] != expected[0] {
|
||||
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
|
||||
}
|
||||
if actual[1] != expected[1] {
|
||||
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateWithContent(t *testing.T) {
|
||||
archive, err := Generate("file", "content")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if archive == nil {
|
||||
t.Fatal("The generated archive should not be nil.")
|
||||
}
|
||||
|
||||
expectedFiles := [][]string{
|
||||
{"file", "content"},
|
||||
}
|
||||
|
||||
tr := tar.NewReader(archive)
|
||||
actualFiles := make([][]string, 0, 10)
|
||||
i := 0
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(tr)
|
||||
content := buf.String()
|
||||
actualFiles = append(actualFiles, []string{hdr.Name, content})
|
||||
i++
|
||||
}
|
||||
if len(actualFiles) != len(expectedFiles) {
|
||||
t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
|
||||
}
|
||||
for i := 0; i < len(expectedFiles); i++ {
|
||||
actual := actualFiles[i]
|
||||
expected := expectedFiles[i]
|
||||
if actual[0] != expected[0] {
|
||||
t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
|
||||
}
|
||||
if actual[1] != expected[1] {
|
||||
t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,7 +4,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -143,17 +142,6 @@ func CopyFile(src, dst string) (int64, error) {
|
|||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||
// The target of the symbolic link may not be a file.
|
||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
|
|
|
@ -1,402 +0,0 @@
|
|||
package fileutils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// CopyFile with invalid src
|
||||
func TestCopyFileWithInvalidSrc(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
defer os.RemoveAll(tempFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest"))
|
||||
if err == nil {
|
||||
t.Fatal("Should have fail to copy an invalid src file")
|
||||
}
|
||||
if bytes != 0 {
|
||||
t.Fatal("Should have written 0 bytes")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// CopyFile with invalid dest
|
||||
func TestCopyFileWithInvalidDest(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
defer os.RemoveAll(tempFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
src := path.Join(tempFolder, "file")
|
||||
err = ioutil.WriteFile(src, []byte("content"), 0740)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path"))
|
||||
if err == nil {
|
||||
t.Fatal("Should have fail to copy an invalid src file")
|
||||
}
|
||||
if bytes != 0 {
|
||||
t.Fatal("Should have written 0 bytes")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// CopyFile with same src and dest
|
||||
func TestCopyFileWithSameSrcAndDest(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
defer os.RemoveAll(tempFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file := path.Join(tempFolder, "file")
|
||||
err = ioutil.WriteFile(file, []byte("content"), 0740)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes, err := CopyFile(file, file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes != 0 {
|
||||
t.Fatal("Should have written 0 bytes as it is the same file.")
|
||||
}
|
||||
}
|
||||
|
||||
// CopyFile with same src and dest but path is different and not clean
|
||||
func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
defer os.RemoveAll(tempFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testFolder := path.Join(tempFolder, "test")
|
||||
err = os.MkdirAll(testFolder, 0740)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
file := path.Join(testFolder, "file")
|
||||
sameFile := testFolder + "/../test/file"
|
||||
err = ioutil.WriteFile(file, []byte("content"), 0740)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bytes, err := CopyFile(file, sameFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes != 0 {
|
||||
t.Fatal("Should have written 0 bytes as it is the same file.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyFile(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
defer os.RemoveAll(tempFolder)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
src := path.Join(tempFolder, "src")
|
||||
dest := path.Join(tempFolder, "dest")
|
||||
ioutil.WriteFile(src, []byte("content"), 0777)
|
||||
ioutil.WriteFile(dest, []byte("destContent"), 0777)
|
||||
bytes, err := CopyFile(src, dest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes != 7 {
|
||||
t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes)
|
||||
}
|
||||
actual, err := ioutil.ReadFile(dest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(actual) != "content" {
|
||||
t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content")
|
||||
}
|
||||
}
|
||||
|
||||
// Reading a symlink to a directory must return the directory
|
||||
func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
|
||||
var err error
|
||||
if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
|
||||
t.Errorf("failed to create directory: %s", err)
|
||||
}
|
||||
|
||||
if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
|
||||
t.Errorf("failed to create symlink: %s", err)
|
||||
}
|
||||
|
||||
var path string
|
||||
if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
|
||||
t.Fatalf("failed to read symlink to directory: %s", err)
|
||||
}
|
||||
|
||||
if path != "/tmp/testReadSymlinkToExistingDirectory" {
|
||||
t.Fatalf("symlink returned unexpected directory: %s", path)
|
||||
}
|
||||
|
||||
if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
|
||||
t.Errorf("failed to remove temporary directory: %s", err)
|
||||
}
|
||||
|
||||
if err = os.Remove("/tmp/dirLinkTest"); err != nil {
|
||||
t.Errorf("failed to remove symlink: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Reading a non-existing symlink must fail
|
||||
func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
|
||||
var path string
|
||||
var err error
|
||||
if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
|
||||
t.Fatalf("error expected for non-existing symlink")
|
||||
}
|
||||
|
||||
if path != "" {
|
||||
t.Fatalf("expected empty path, but '%s' was returned", path)
|
||||
}
|
||||
}
|
||||
|
||||
// Reading a symlink to a file must fail
|
||||
func TestReadSymlinkedDirectoryToFile(t *testing.T) {
|
||||
var err error
|
||||
var file *os.File
|
||||
|
||||
if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
|
||||
t.Fatalf("failed to create file: %s", err)
|
||||
}
|
||||
|
||||
file.Close()
|
||||
|
||||
if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
|
||||
t.Errorf("failed to create symlink: %s", err)
|
||||
}
|
||||
|
||||
var path string
|
||||
if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
|
||||
t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
|
||||
}
|
||||
|
||||
if path != "" {
|
||||
t.Fatalf("path should've been empty: %s", path)
|
||||
}
|
||||
|
||||
if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
|
||||
t.Errorf("failed to remove file: %s", err)
|
||||
}
|
||||
|
||||
if err = os.Remove("/tmp/fileLinkTest"); err != nil {
|
||||
t.Errorf("failed to remove symlink: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWildcardMatches(t *testing.T) {
|
||||
match, _ := Matches("fileutils.go", []string{"*"})
|
||||
if match != true {
|
||||
t.Errorf("failed to get a wildcard match, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A simple pattern match should return true.
|
||||
func TestPatternMatches(t *testing.T) {
|
||||
match, _ := Matches("fileutils.go", []string{"*.go"})
|
||||
if match != true {
|
||||
t.Errorf("failed to get a match, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// An exclusion followed by an inclusion should return true.
|
||||
func TestExclusionPatternMatchesPatternBefore(t *testing.T) {
|
||||
match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"})
|
||||
if match != true {
|
||||
t.Errorf("failed to get true match on exclusion pattern, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A folder pattern followed by an exception should return false.
|
||||
func TestPatternMatchesFolderExclusions(t *testing.T) {
|
||||
match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"})
|
||||
if match != false {
|
||||
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A folder pattern followed by an exception should return false.
|
||||
func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) {
|
||||
match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"})
|
||||
if match != false {
|
||||
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A folder pattern followed by an exception should return false.
|
||||
func TestPatternMatchesFolderWildcardExclusions(t *testing.T) {
|
||||
match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"})
|
||||
if match != false {
|
||||
t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A pattern followed by an exclusion should return false.
|
||||
func TestExclusionPatternMatchesPatternAfter(t *testing.T) {
|
||||
match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"})
|
||||
if match != false {
|
||||
t.Errorf("failed to get false match on exclusion pattern, got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A filename evaluating to . should return false.
|
||||
func TestExclusionPatternMatchesWholeDirectory(t *testing.T) {
|
||||
match, _ := Matches(".", []string{"*.go"})
|
||||
if match != false {
|
||||
t.Errorf("failed to get false match on ., got %v", match)
|
||||
}
|
||||
}
|
||||
|
||||
// A single ! pattern should return an error.
|
||||
func TestSingleExclamationError(t *testing.T) {
|
||||
_, err := Matches("fileutils.go", []string{"!"})
|
||||
if err == nil {
|
||||
t.Errorf("failed to get an error for a single exclamation point, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// A string preceded with a ! should return true from Exclusion.
|
||||
func TestExclusion(t *testing.T) {
|
||||
exclusion := exclusion("!")
|
||||
if !exclusion {
|
||||
t.Errorf("failed to get true for a single !, got %v", exclusion)
|
||||
}
|
||||
}
|
||||
|
||||
// Matches with no patterns
|
||||
func TestMatchesWithNoPatterns(t *testing.T) {
|
||||
matches, err := Matches("/any/path/there", []string{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if matches {
|
||||
t.Fatalf("Should not have match anything")
|
||||
}
|
||||
}
|
||||
|
||||
// Matches with malformed patterns
|
||||
func TestMatchesWithMalformedPatterns(t *testing.T) {
|
||||
matches, err := Matches("/any/path/there", []string{"["})
|
||||
if err == nil {
|
||||
t.Fatal("Should have failed because of a malformed syntax in the pattern")
|
||||
}
|
||||
if matches {
|
||||
t.Fatalf("Should not have match anything")
|
||||
}
|
||||
}
|
||||
|
||||
// An empty string should return true from Empty.
|
||||
func TestEmpty(t *testing.T) {
|
||||
empty := empty("")
|
||||
if !empty {
|
||||
t.Errorf("failed to get true for an empty string, got %v", empty)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatterns(t *testing.T) {
|
||||
cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"})
|
||||
if len(cleaned) != 2 {
|
||||
t.Errorf("expected 2 element slice, got %v", len(cleaned))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsStripEmptyPatterns(t *testing.T) {
|
||||
cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""})
|
||||
if len(cleaned) != 2 {
|
||||
t.Errorf("expected 2 element slice, got %v", len(cleaned))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsExceptionFlag(t *testing.T) {
|
||||
_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"})
|
||||
if !exceptions {
|
||||
t.Errorf("expected exceptions to be true, got %v", exceptions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) {
|
||||
_, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"})
|
||||
if !exceptions {
|
||||
t.Errorf("expected exceptions to be true, got %v", exceptions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) {
|
||||
_, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "})
|
||||
if !exceptions {
|
||||
t.Errorf("expected exceptions to be true, got %v", exceptions)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsErrorSingleException(t *testing.T) {
|
||||
_, _, _, err := CleanPatterns([]string{"!"})
|
||||
if err == nil {
|
||||
t.Errorf("expected error on single exclamation point, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCleanPatternsFolderSplit(t *testing.T) {
|
||||
_, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"})
|
||||
if dirs[0][0] != "docs" {
|
||||
t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1])
|
||||
}
|
||||
if dirs[0][1] != "config" {
|
||||
t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIfNotExistsDir(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempFolder)
|
||||
|
||||
folderToCreate := filepath.Join(tempFolder, "tocreate")
|
||||
|
||||
if err := CreateIfNotExists(folderToCreate, true); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fileinfo, err := os.Stat(folderToCreate)
|
||||
if err != nil {
|
||||
t.Fatalf("Should have create a folder, got %v", err)
|
||||
}
|
||||
|
||||
if !fileinfo.IsDir() {
|
||||
t.Fatalf("Should have been a dir, seems it's not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateIfNotExistsFile(t *testing.T) {
|
||||
tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tempFolder)
|
||||
|
||||
fileToCreate := filepath.Join(tempFolder, "file/to/create")
|
||||
|
||||
if err := CreateIfNotExists(fileToCreate, false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
fileinfo, err := os.Stat(fileToCreate)
|
||||
if err != nil {
|
||||
t.Fatalf("Should have create a file, got %v", err)
|
||||
}
|
||||
|
||||
if fileinfo.IsDir() {
|
||||
t.Fatalf("Should have been a file, seems it's not")
|
||||
}
|
||||
}
|
22
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package fileutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue