本文整理匯總了Golang中github.com/docker/engine-api/client.Client類的典型用法代碼示例。如果您正苦於以下問題:Golang Client類的具體用法?Golang Client怎麽用?Golang Client使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Client類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getIntNames
func getIntNames(netID string, docker *dockerclient.Client) (*intNames, error) {
net, err := docker.NetworkInspect(context.Background(), netID)
if err != nil {
log.Errorf("Error getting networks: %v", err)
return nil, err
}
names := &intNames{}
if net.Driver != "vxlan" {
log.Errorf("Network %v is not a vxlan network", netID)
return nil, errors.New("Not a vxlan network")
}
names.VxlanName = "vx_" + netID[:12]
// get interface names from options first
for k, v := range net.Options {
if k == "vxlanName" {
names.VxlanName = v
}
}
return names, nil
}
示例2: FindOnionIPAddress
// FindOnionIPAddress finds the IP address of a target container that is connected
// to the given network. This IP address is accessible from any other container
// connected to the same network.
func FindOnionIPAddress(cli *client.Client, target, network string) (string, error) {
inspect, err := cli.ContainerInspect(target)
if err != nil {
return "", err
}
endpoint, ok := inspect.NetworkSettings.Networks[network]
if !ok {
return "", fmt.Errorf("inspect container: container '%s' not connected to network '%s'", target, network)
}
return endpoint.IPAddress, nil
}
示例3: volumeVmdkExists
// returns nil for NOT_FOUND and if volume exists
// still fails the test if driver for this volume is not vmdk
func volumeVmdkExists(t *testing.T, c *client.Client, vol string) *types.Volume {
reply, err := c.VolumeList(context.Background(), filters.Args{})
if err != nil {
t.Fatalf("Failed to enumerate volumes: %v", err)
}
for _, v := range reply.Volumes {
// t.Log(v.Name, v.Driver, v.Mountpoint)
if v.Name == vol {
return v
}
}
return nil
}
示例4: getGateway
func getGateway(netID string, docker dockerclient.Client) (string, error) {
net, err := docker.NetworkInspect(context.Background(), netID)
if err != nil {
log.Errorf("Error inspecting network: %v", err)
return "", err
}
for _, config := range net.IPAM.Config {
if config.Gateway != "" {
return config.Gateway, nil
}
}
return "", nil
}
示例5: discoverAndSync
// enumberates volumes and builds refCountsMap, then sync with mount info
func (r refCountsMap) discoverAndSync(c *client.Client, d *vmdkDriver) error {
// we assume to have empty refcounts. Let's enforce
for name := range r {
delete(r, name)
}
filters := filters.NewArgs()
filters.Add("status", "running")
filters.Add("status", "paused")
filters.Add("status", "restarting")
containers, err := c.ContainerList(context.Background(), types.ContainerListOptions{
All: true,
Filter: filters,
})
if err != nil {
return err
}
log.Debugf("Found %d running or paused containers", len(containers))
for _, ct := range containers {
containerJSONInfo, err := c.ContainerInspect(context.Background(), ct.ID)
if err != nil {
log.Errorf("ContainerInspect failed for %s (err: %v)", ct.Names, err)
continue
}
log.Debugf(" Mounts for %v", ct.Names)
for _, mount := range containerJSONInfo.Mounts {
if mount.Driver == driverName {
r.incr(mount.Name)
log.Debugf(" name=%v (driver=%s source=%s)",
mount.Name, mount.Driver, mount.Source)
}
}
}
// Check that refcounts and actual mount info from Linux match
// If they don't, unmount unneeded stuff, or yell if something is
// not mounted but should be (it's error. we should not get there)
r.getMountInfo()
r.syncMountsWithRefCounters(d)
return nil
}
示例6: FindTargetPorts
// FindTargetPorts finds the set of ports EXPOSE'd on the target container. This
// includes non-TCP ports, so callers should make sure they exclude protocols
// not supported by Tor.
func FindTargetPorts(cli *client.Client, target string) ([]nat.Port, error) {
inspect, err := cli.ContainerInspect(target)
if err != nil {
return nil, err
}
// Make sure we don't dereference nils.
if inspect.NetworkSettings == nil || inspect.NetworkSettings.Ports == nil {
return nil, fmt.Errorf("inspect container: network settings not available")
}
// Get keys from map.
var ports []nat.Port
for port, _ := range inspect.NetworkSettings.Ports {
ports = append(ports, port)
}
return ports, nil
}
示例7: CreateOnionNetwork
// CreateOnionNetwork creates a new bridge network with a random (but recognisable)
// name. If it can't create a name after XXX attempts, it will return an error.
func CreateOnionNetwork(cli *client.Client, ident string) (string, error) {
options := types.NetworkCreate{
Name: ident,
CheckDuplicate: true,
Driver: "bridge",
}
resp, err := cli.NetworkCreate(options)
if err != nil {
// TODO: Retry if we get "already exists".
return "", err
}
if resp.Warning != "" {
log.Warn(resp.Warning)
}
return ident, nil
}
示例8: runContainerCmd
// runs a command in a container , with volume mounted
// returns completion code.
// exits (t.Fatal() or create/start/wait errors
func runContainerCmd(t *testing.T, client *client.Client, volumeName string,
image string, cmd *strslice.StrSlice, addr string) int {
mountPoint := getMountpoint(volumeName)
bind := volumeName + ":" + mountPoint
t.Logf("Running cmd=%v with vol=%s on client %s", cmd, volumeName, addr)
r, err := client.ContainerCreate(context.Background(),
&container.Config{Image: image, Cmd: *cmd,
Volumes: map[string]struct{}{mountPoint: {}}},
&container.HostConfig{Binds: []string{bind}}, nil, "")
if err != nil {
t.Fatalf("\tContainer create failed: %v", err)
}
err = client.ContainerStart(context.Background(), r.ID,
types.ContainerStartOptions{})
if err != nil {
t.Fatalf("\tContainer start failed: id=%s, err %v", r.ID, err)
}
code, err := client.ContainerWait(context.Background(), r.ID)
if err != nil {
t.Fatalf("\tContainer wait failed: id=%s, err %v", r.ID, err)
}
if removeContainers == false {
t.Logf("\tSkipping container removal, id=%s (removeContainers == false)",
r.ID)
return code
}
err = client.ContainerRemove(context.Background(), r.ID,
types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
if err != nil {
t.Fatalf("\nContainer removal failed: %v", err)
}
return code
}
示例9: newKubeDockerClient
// newKubeDockerClient creates an kubeDockerClient from an existing docker client. If requestTimeout is 0,
// defaultTimeout will be applied.
func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout time.Duration) DockerInterface {
if requestTimeout == 0 {
requestTimeout = defaultTimeout
}
k := &kubeDockerClient{
client: dockerClient,
timeout: requestTimeout,
}
// Notice that this assumes that docker is running before kubelet is started.
v, err := k.Version()
if err != nil {
glog.Errorf("failed to retrieve docker version: %v", err)
glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.")
} else {
// Update client version with real api version.
dockerClient.UpdateClientVersion(v.APIVersion)
}
return k
}
示例10: runTorContainer
func runTorContainer(cli *client.Client, ident, imageID, network string) (string, error) {
config := &types.ContainerCreateConfig{
Name: ident,
Config: &containerTypes.Config{
Image: imageID,
},
}
resp, err := cli.ContainerCreate(config.Config, config.HostConfig, config.NetworkingConfig, config.Name)
if err != nil {
return "", err
}
// TODO: Remove container on failure.
for _, warning := range resp.Warnings {
log.Warn(warning)
}
if err := cli.ContainerStart(resp.ID); err != nil {
return "", err
}
// Connect to the network.
if err := cli.NetworkConnect(network, resp.ID, nil); err != nil {
return "", err
}
return resp.ID, err
}
示例11: GetOnionHostname
func GetOnionHostname(cli *client.Client, containerID string) (string, error) {
content, stat, err := cli.CopyFromContainer(containerID, HostnamePath)
// XXX: This isn't very pretty. But we need to wait until Tor generates
// an .onion address, and there's not really any better way of
// doing it.
for err != nil && strings.Contains(err.Error(), "no such file or directory") {
// Make sure the container hasn't died.
if inspect, err := cli.ContainerInspect(containerID); err != nil {
return "", fmt.Errorf("error inspecting container: %s", err)
} else if !isRunning(inspect.State) {
return "", fmt.Errorf("container died before the hostname was computed")
}
log.Warnf("tor onion hostname not found in container, retrying after a short nap...")
time.Sleep(500 * time.Millisecond)
content, stat, err = cli.CopyFromContainer(containerID, HostnamePath)
}
if err != nil {
return "", err
}
defer content.Close()
if stat.Mode.IsDir() {
return "", fmt.Errorf("hostname file is a directory")
}
tr := tar.NewReader(content)
hdr, err := tr.Next()
for err != io.EOF {
if err != nil {
break
}
// XXX: Maybe do filepath.Base()?
if hdr.Name != "hostname" {
continue
}
data, err := ioutil.ReadAll(tr)
if err != nil {
return "", err
}
hostname := string(data)
return strings.TrimSpace(hostname), nil
}
return "", fmt.Errorf("hostname file not in copied archive")
}
示例12: PurgeOnionNetwork
// PurgeOnionNetwork purges an onion network, disconnecting all containers with
// it. We assume that nobody is adding containers to this network.
func PurgeOnionNetwork(cli *client.Client, network string) error {
inspect, err := cli.NetworkInspect(network)
if err != nil {
return err
}
for container, _ := range inspect.Containers {
log.Infof("purge network %s: disconnecting container %s", network, container)
if err := cli.NetworkDisconnect(network, container, true); err != nil {
return err
}
}
return cli.NetworkRemove(network)
}
示例13: newDockerContainerHandler
// newDockerContainerHandler returns a new container.ContainerHandler
func newDockerContainerHandler(
client *docker.Client,
name string,
machineInfoFactory info.MachineInfoFactory,
fsInfo fs.FsInfo,
storageDriver storageDriver,
storageDir string,
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
inHostNamespace bool,
metadataEnvs []string,
dockerVersion []int,
ignoreMetrics container.MetricSet,
thinPoolWatcher *devicemapper.ThinPoolWatcher,
) (container.ContainerHandler, error) {
// Create the cgroup paths.
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
for key, val := range cgroupSubsystems.MountPoints {
cgroupPaths[key] = path.Join(val, name)
}
// Generate the equivalent cgroup manager for this container.
cgroupManager := &cgroupfs.Manager{
Cgroups: &libcontainerconfigs.Cgroup{
Name: name,
},
Paths: cgroupPaths,
}
rootFs := "/"
if !inHostNamespace {
rootFs = "/rootfs"
storageDir = path.Join(rootFs, storageDir)
}
id := ContainerNameToDockerId(name)
// Add the Containers dir where the log files are stored.
// FIXME: Give `otherStorageDir` a more descriptive name.
otherStorageDir := path.Join(storageDir, pathToContainersDir, id)
rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
if err != nil {
return nil, err
}
// Determine the rootfs storage dir OR the pool name to determine the device
var (
rootfsStorageDir string
poolName string
)
switch storageDriver {
case aufsStorageDriver:
rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
case overlayStorageDriver:
rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
case devicemapperStorageDriver:
status, err := Status()
if err != nil {
return nil, fmt.Errorf("unable to determine docker status: %v", err)
}
poolName = status.DriverStatus[dockerutil.DriverStatusPoolName]
}
// TODO: extract object mother method
handler := &dockerContainerHandler{
id: id,
client: client,
name: name,
machineInfoFactory: machineInfoFactory,
cgroupPaths: cgroupPaths,
cgroupManager: cgroupManager,
storageDriver: storageDriver,
fsInfo: fsInfo,
rootFs: rootFs,
poolName: poolName,
rootfsStorageDir: rootfsStorageDir,
envs: make(map[string]string),
ignoreMetrics: ignoreMetrics,
thinPoolWatcher: thinPoolWatcher,
}
// We assume that if Inspect fails then the container is not known to docker.
ctnr, err := client.ContainerInspect(context.Background(), id)
if err != nil {
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
}
// Timestamp returned by Docker is in time.RFC3339Nano format.
handler.creationTime, err = time.Parse(time.RFC3339Nano, ctnr.Created)
if err != nil {
// This should not happen, report the error just in case
return nil, fmt.Errorf("failed to parse the create timestamp %q for container %q: %v", ctnr.Created, id, err)
}
handler.pid = ctnr.State.Pid
// Add the name and bare ID as aliases of the container.
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
handler.labels = ctnr.Config.Labels
handler.image = ctnr.Config.Image
//.........這裏部分代碼省略.........
示例14: buildTorImage
func buildTorImage(cli *client.Client, ctx io.Reader) (string, error) {
// XXX: There's currently no way to get the image ID of a build without
// manually parsing the output, or tagging the image. Since I'm not in
// the mood for the former, we can tag the build with a random name.
// Unfortunately, untagging of images isn't supported, so we'll have to
// use a name that allows us to not pollute the host.
options := types.ImageBuildOptions{
// XXX: If we SuppressOutput we can get just the image ID, but we lose
// being able to tell users what the status of the build is.
//SuppressOutput: true,
Tags: []string{MkonionTag},
Remove: true,
ForceRemove: true,
Dockerfile: "Dockerfile",
Context: ctx,
}
build, err := cli.ImageBuild(options)
if err != nil {
return "", err
}
// XXX: For some weird reason, at this point the build has not finished. We
// need to wait for build.Body to be closed. We might as well tell the
// user what the status of the build is.
log.Infof("building %s", MkonionTag)
dec := json.NewDecoder(build.Body)
for {
// Modified from pkg/jsonmessage in Docker.
type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
}
// Decode the JSONMessages.
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return "", err
}
jm.Stream = strings.TrimSpace(jm.Stream)
jm.Status = strings.TrimSpace(jm.Status)
// Log the status.
if jm.Stream != "" {
log.Info(jm.Stream)
}
if jm.Status != "" {
log.Info(jm.Status)
}
}
inspect, _, err := cli.ImageInspectWithRaw(MkonionTag, false)
if err != nil {
// XXX: Should probably clean up the built image here?
return "", err
}
log.Infof("successfully built %s image", MkonionTag)
return inspect.ID, nil
}
示例15: ConnectOnionNetwork
// ConnectOnionNetwork connects a target container to the onion network, allowing
// the container to be accessed by the Tor relay container.
func ConnectOnionNetwork(cli *client.Client, target, network string) error {
// XXX: Should configure this to use a subnet like 10.x.x.x.
options := &networkTypes.EndpointSettings{}
return cli.NetworkConnect(network, target, options)
}