2016-12-09 17:17:34 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
"syscall"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
gocontext "context"
|
|
|
|
|
2017-04-03 20:14:15 +00:00
|
|
|
contentapi "github.com/containerd/containerd/api/services/content"
|
|
|
|
"github.com/containerd/containerd/api/services/execution"
|
2017-04-04 01:40:59 +00:00
|
|
|
imagesapi "github.com/containerd/containerd/api/services/images"
|
2017-04-03 20:14:15 +00:00
|
|
|
rootfsapi "github.com/containerd/containerd/api/services/rootfs"
|
|
|
|
"github.com/containerd/containerd/api/types/container"
|
|
|
|
"github.com/containerd/containerd/content"
|
|
|
|
"github.com/containerd/containerd/images"
|
|
|
|
contentservice "github.com/containerd/containerd/services/content"
|
2017-04-04 01:40:59 +00:00
|
|
|
imagesservice "github.com/containerd/containerd/services/images"
|
2017-02-06 22:57:43 +00:00
|
|
|
"github.com/pkg/errors"
|
2016-12-09 17:17:34 +00:00
|
|
|
"github.com/tonistiigi/fifo"
|
|
|
|
"github.com/urfave/cli"
|
|
|
|
"google.golang.org/grpc"
|
|
|
|
"google.golang.org/grpc/grpclog"
|
|
|
|
)
|
|
|
|
|
|
|
|
var grpcConn *grpc.ClientConn
|
|
|
|
|
2017-01-12 18:24:22 +00:00
|
|
|
func prepareStdio(stdin, stdout, stderr string, console bool) (*sync.WaitGroup, error) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
ctx := gocontext.Background()
|
2016-12-09 17:17:34 +00:00
|
|
|
|
2017-01-12 18:24:22 +00:00
|
|
|
f, err := fifo.OpenFifo(ctx, stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func(c io.Closer) {
|
2016-12-09 17:17:34 +00:00
|
|
|
if err != nil {
|
2017-01-12 18:24:22 +00:00
|
|
|
c.Close()
|
2016-12-09 17:17:34 +00:00
|
|
|
}
|
2017-01-12 18:24:22 +00:00
|
|
|
}(f)
|
2017-01-17 11:04:20 +00:00
|
|
|
go func(w io.WriteCloser) {
|
2017-01-12 18:24:22 +00:00
|
|
|
io.Copy(w, os.Stdin)
|
2017-01-17 11:04:20 +00:00
|
|
|
w.Close()
|
2017-01-12 18:24:22 +00:00
|
|
|
}(f)
|
2016-12-09 17:17:34 +00:00
|
|
|
|
2017-01-12 18:24:22 +00:00
|
|
|
f, err = fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func(c io.Closer) {
|
|
|
|
if err != nil {
|
|
|
|
c.Close()
|
2016-12-09 17:17:34 +00:00
|
|
|
}
|
2017-01-12 18:24:22 +00:00
|
|
|
}(f)
|
|
|
|
wg.Add(1)
|
2017-01-17 11:04:20 +00:00
|
|
|
go func(r io.ReadCloser) {
|
2017-01-12 18:24:22 +00:00
|
|
|
io.Copy(os.Stdout, r)
|
2017-01-17 11:04:20 +00:00
|
|
|
r.Close()
|
2017-01-12 18:24:22 +00:00
|
|
|
wg.Done()
|
|
|
|
}(f)
|
2016-12-09 17:17:34 +00:00
|
|
|
|
2017-01-12 18:24:22 +00:00
|
|
|
f, err = fifo.OpenFifo(ctx, stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer func(c io.Closer) {
|
|
|
|
if err != nil {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
}(f)
|
|
|
|
if !console {
|
|
|
|
wg.Add(1)
|
2017-01-17 11:04:20 +00:00
|
|
|
go func(r io.ReadCloser) {
|
2017-01-12 18:24:22 +00:00
|
|
|
io.Copy(os.Stderr, r)
|
2017-01-17 11:04:20 +00:00
|
|
|
r.Close()
|
2017-01-12 18:24:22 +00:00
|
|
|
wg.Done()
|
|
|
|
}(f)
|
2016-12-09 17:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &wg, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getGRPCConnection(context *cli.Context) (*grpc.ClientConn, error) {
|
|
|
|
if grpcConn != nil {
|
|
|
|
return grpcConn, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
bindSocket := context.GlobalString("socket")
|
|
|
|
// reset the logger for grpc to log to dev/null so that it does not mess with our stdio
|
|
|
|
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
|
|
|
dialOpts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(100 * time.Second)}
|
|
|
|
dialOpts = append(dialOpts,
|
|
|
|
grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
|
|
|
|
return net.DialTimeout("unix", bindSocket, timeout)
|
|
|
|
},
|
|
|
|
))
|
|
|
|
|
|
|
|
conn, err := grpc.Dial(fmt.Sprintf("unix://%s", bindSocket), dialOpts...)
|
|
|
|
if err != nil {
|
2017-02-06 22:57:43 +00:00
|
|
|
return nil, errors.Wrapf(err, "failed to dial %q", bindSocket)
|
2016-12-09 17:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
grpcConn = conn
|
|
|
|
return grpcConn, nil
|
|
|
|
}
|
|
|
|
|
2017-02-13 18:23:28 +00:00
|
|
|
func getExecutionService(context *cli.Context) (execution.ContainerServiceClient, error) {
|
2016-12-09 17:17:34 +00:00
|
|
|
conn, err := getGRPCConnection(context)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-02-13 18:23:28 +00:00
|
|
|
return execution.NewContainerServiceClient(conn), nil
|
2016-12-09 17:17:34 +00:00
|
|
|
}
|
|
|
|
|
cmd/dist, cmd/ctr: end to end image pull
With this changeset, we now have a proof of concept of end to end pull.
Up to this point, the relationship between subsystems has been somewhat
theoretical. We now leverage fetching, the snapshot drivers, the rootfs
service, image metadata and the execution service, validating the proposed
model for containerd. There are a few caveats, including the need to move some
of the access into GRPC services, but the basic components are there.
The first command we will cover here is `dist pull`. This is the analog
of `docker pull` and `git pull`. It performs a full resource fetch for
an image and unpacks the root filesystem into the snapshot drivers. An
example follows:
``` console
$ sudo ./bin/dist pull docker.io/library/redis:latest
docker.io/library/redis:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:3b281f2bcae3b25c701d53a219924fffe79bdb74385340b73a539ed4020999c4: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:e4a35914679d05d25e2fccfd310fde1aa59ffbbf1b0b9d36f7b03db5ca0311b0: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:4b7726832aec75f0a742266c7190c4d2217492722dfd603406208eaa902648d8: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:338a7133395941c85087522582af182d2f6477dbf54ba769cb24ec4fd91d728f: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:83f12ff60ff1132d1e59845e26c41968406b4176c1a85a50506c954696b21570: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:693502eb7dfbc6b94964ae66ebc72d3e32facd981c72995b09794f1e87bac184: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:622732cddc347afc9360b4b04b46c6f758191a1dc73d007f95548658847ee67e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:19a7e34366a6f558336c364693df538c38307484b729a36fede76432789f084f: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 1.6 s total: 0.0 B (0.0 B/s)
INFO[0001] unpacking rootfs
```
Note that we haven't integrated rootfs unpacking into the status output, but we
pretty much have what is in docker today (:P). We can see the result of our pull
with the following:
```console
$ sudo ./bin/dist images
REF TYPE DIGEST SIZE
docker.io/library/redis:latest application/vnd.docker.distribution.manifest.v2+json sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059 1.8 kB
```
The above shows that we have an image called "docker.io/library/redis:latest"
mapped to the given digest marked with a specific format. We get the size of
the manifest right now, not the full image, but we can add more as we need it.
For the most part, this is all that is needed, but a few tweaks to the model
for naming may need to be added. Specifically, we may want to index under a few
different names, including those qualified by hash or matched by tag versions.
We can do more work in this area as we develop the metadata store.
The name shown above can then be used to run the actual container image. We can
do this with the following command:
```console
$ sudo ./bin/ctr run --id foo docker.io/library/redis:latest /usr/local/bin/redis-server
1:C 17 Mar 17:20:25.316 # Warning: no config file specified, using the default config. In order to specify a config file use /usr/local/bin/redis-server /path/to/redis.conf
1:M 17 Mar 17:20:25.317 * Increased maximum number of open files to 10032 (it was originally set to 1024).
_._
_.-``__ ''-._
_.-`` `. `_. ''-._ Redis 3.2.8 (00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 1
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
1:M 17 Mar 17:20:25.326 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
1:M 17 Mar 17:20:25.326 # Server started, Redis version 3.2.8
1:M 17 Mar 17:20:25.326 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
1:M 17 Mar 17:20:25.326 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
1:M 17 Mar 17:20:25.326 * The server is now ready to accept connections on port 6379
```
Wow! So, now we are running `redis`!
There are still a few things to work out. Notice that we have to specify the
command as part of the arguments to `ctr run`. This is because are not yet
reading the image config and converting it to an OCI runtime config. With the
base laid in this PR, adding such functionality should be straightforward.
While this is a _little_ messy, this is great progress. It should be easy
sailing from here.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-18 00:00:52 +00:00
|
|
|
func getContentProvider(context *cli.Context) (content.Provider, error) {
|
|
|
|
conn, err := getGRPCConnection(context)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return contentservice.NewProviderFromClient(contentapi.NewContentClient(conn)), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getRootFSService(context *cli.Context) (rootfsapi.RootFSClient, error) {
|
|
|
|
conn, err := getGRPCConnection(context)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return rootfsapi.NewRootFSClient(conn), nil
|
|
|
|
}
|
|
|
|
|
2017-04-04 01:40:59 +00:00
|
|
|
func getImageStore(clicontext *cli.Context) (images.Store, error) {
|
|
|
|
conn, err := getGRPCConnection(clicontext)
|
cmd/dist, cmd/ctr: end to end image pull
With this changeset, we now have a proof of concept of end to end pull.
Up to this point, the relationship between subsystems has been somewhat
theoretical. We now leverage fetching, the snapshot drivers, the rootfs
service, image metadata and the execution service, validating the proposed
model for containerd. There are a few caveats, including the need to move some
of the access into GRPC services, but the basic components are there.
The first command we will cover here is `dist pull`. This is the analog
of `docker pull` and `git pull`. It performs a full resource fetch for
an image and unpacks the root filesystem into the snapshot drivers. An
example follows:
``` console
$ sudo ./bin/dist pull docker.io/library/redis:latest
docker.io/library/redis:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:3b281f2bcae3b25c701d53a219924fffe79bdb74385340b73a539ed4020999c4: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:e4a35914679d05d25e2fccfd310fde1aa59ffbbf1b0b9d36f7b03db5ca0311b0: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:4b7726832aec75f0a742266c7190c4d2217492722dfd603406208eaa902648d8: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:338a7133395941c85087522582af182d2f6477dbf54ba769cb24ec4fd91d728f: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:83f12ff60ff1132d1e59845e26c41968406b4176c1a85a50506c954696b21570: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:693502eb7dfbc6b94964ae66ebc72d3e32facd981c72995b09794f1e87bac184: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:622732cddc347afc9360b4b04b46c6f758191a1dc73d007f95548658847ee67e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:19a7e34366a6f558336c364693df538c38307484b729a36fede76432789f084f: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 1.6 s total: 0.0 B (0.0 B/s)
INFO[0001] unpacking rootfs
```
Note that we haven't integrated rootfs unpacking into the status output, but we
pretty much have what is in docker today (:P). We can see the result of our pull
with the following:
```console
$ sudo ./bin/dist images
REF TYPE DIGEST SIZE
docker.io/library/redis:latest application/vnd.docker.distribution.manifest.v2+json sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059 1.8 kB
```
The above shows that we have an image called "docker.io/library/redis:latest"
mapped to the given digest marked with a specific format. We get the size of
the manifest right now, not the full image, but we can add more as we need it.
For the most part, this is all that is needed, but a few tweaks to the model
for naming may need to be added. Specifically, we may want to index under a few
different names, including those qualified by hash or matched by tag versions.
We can do more work in this area as we develop the metadata store.
The name shown above can then be used to run the actual container image. We can
do this with the following command:
```console
$ sudo ./bin/ctr run --id foo docker.io/library/redis:latest /usr/local/bin/redis-server
1:C 17 Mar 17:20:25.316 # Warning: no config file specified, using the default config. In order to specify a config file use /usr/local/bin/redis-server /path/to/redis.conf
1:M 17 Mar 17:20:25.317 * Increased maximum number of open files to 10032 (it was originally set to 1024).
_._
_.-``__ ''-._
_.-`` `. `_. ''-._ Redis 3.2.8 (00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 1
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
1:M 17 Mar 17:20:25.326 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
1:M 17 Mar 17:20:25.326 # Server started, Redis version 3.2.8
1:M 17 Mar 17:20:25.326 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
1:M 17 Mar 17:20:25.326 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
1:M 17 Mar 17:20:25.326 * The server is now ready to accept connections on port 6379
```
Wow! So, now we are running `redis`!
There are still a few things to work out. Notice that we have to specify the
command as part of the arguments to `ctr run`. This is because are not yet
reading the image config and converting it to an OCI runtime config. With the
base laid in this PR, adding such functionality should be straightforward.
While this is a _little_ messy, this is great progress. It should be easy
sailing from here.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-18 00:00:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-04 01:40:59 +00:00
|
|
|
return imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(conn)), nil
|
cmd/dist, cmd/ctr: end to end image pull
With this changeset, we now have a proof of concept of end to end pull.
Up to this point, the relationship between subsystems has been somewhat
theoretical. We now leverage fetching, the snapshot drivers, the rootfs
service, image metadata and the execution service, validating the proposed
model for containerd. There are a few caveats, including the need to move some
of the access into GRPC services, but the basic components are there.
The first command we will cover here is `dist pull`. This is the analog
of `docker pull` and `git pull`. It performs a full resource fetch for
an image and unpacks the root filesystem into the snapshot drivers. An
example follows:
``` console
$ sudo ./bin/dist pull docker.io/library/redis:latest
docker.io/library/redis:latest: resolved |++++++++++++++++++++++++++++++++++++++|
manifest-sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:3b281f2bcae3b25c701d53a219924fffe79bdb74385340b73a539ed4020999c4: done |++++++++++++++++++++++++++++++++++++++|
config-sha256:e4a35914679d05d25e2fccfd310fde1aa59ffbbf1b0b9d36f7b03db5ca0311b0: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:4b7726832aec75f0a742266c7190c4d2217492722dfd603406208eaa902648d8: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:338a7133395941c85087522582af182d2f6477dbf54ba769cb24ec4fd91d728f: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:83f12ff60ff1132d1e59845e26c41968406b4176c1a85a50506c954696b21570: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:693502eb7dfbc6b94964ae66ebc72d3e32facd981c72995b09794f1e87bac184: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:622732cddc347afc9360b4b04b46c6f758191a1dc73d007f95548658847ee67e: done |++++++++++++++++++++++++++++++++++++++|
layer-sha256:19a7e34366a6f558336c364693df538c38307484b729a36fede76432789f084f: done |++++++++++++++++++++++++++++++++++++++|
elapsed: 1.6 s total: 0.0 B (0.0 B/s)
INFO[0001] unpacking rootfs
```
Note that we haven't integrated rootfs unpacking into the status output, but we
pretty much have what is in docker today (:P). We can see the result of our pull
with the following:
```console
$ sudo ./bin/dist images
REF TYPE DIGEST SIZE
docker.io/library/redis:latest application/vnd.docker.distribution.manifest.v2+json sha256:4c8fb09e8d634ab823b1c125e64f0e1ceaf216025aa38283ea1b42997f1e8059 1.8 kB
```
The above shows that we have an image called "docker.io/library/redis:latest"
mapped to the given digest marked with a specific format. We get the size of
the manifest right now, not the full image, but we can add more as we need it.
For the most part, this is all that is needed, but a few tweaks to the model
for naming may need to be added. Specifically, we may want to index under a few
different names, including those qualified by hash or matched by tag versions.
We can do more work in this area as we develop the metadata store.
The name shown above can then be used to run the actual container image. We can
do this with the following command:
```console
$ sudo ./bin/ctr run --id foo docker.io/library/redis:latest /usr/local/bin/redis-server
1:C 17 Mar 17:20:25.316 # Warning: no config file specified, using the default config. In order to specify a config file use /usr/local/bin/redis-server /path/to/redis.conf
1:M 17 Mar 17:20:25.317 * Increased maximum number of open files to 10032 (it was originally set to 1024).
_._
_.-``__ ''-._
_.-`` `. `_. ''-._ Redis 3.2.8 (00000000/0) 64 bit
.-`` .-```. ```\/ _.,_ ''-._
( ' , .-` | `, ) Running in standalone mode
|`-._`-...-` __...-.``-._|'` _.-'| Port: 6379
| `-._ `._ / _.-' | PID: 1
`-._ `-._ `-./ _.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' | http://redis.io
`-._ `-._`-.__.-'_.-' _.-'
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
1:M 17 Mar 17:20:25.326 # WARNING: The TCP backlog setting of 511 cannot be enforced because /proc/sys/net/core/somaxconn is set to the lower value of 128.
1:M 17 Mar 17:20:25.326 # Server started, Redis version 3.2.8
1:M 17 Mar 17:20:25.326 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
1:M 17 Mar 17:20:25.326 # WARNING you have Transparent Huge Pages (THP) support enabled in your kernel. This will create latency and memory usage issues with Redis. To fix this issue run the command 'echo never > /sys/kernel/mm/transparent_hugepage/enabled' as root, and add it to your /etc/rc.local in order to retain the setting after a reboot. Redis must be restarted after THP is disabled.
1:M 17 Mar 17:20:25.326 * The server is now ready to accept connections on port 6379
```
Wow! So, now we are running `redis`!
There are still a few things to work out. Notice that we have to specify the
command as part of the arguments to `ctr run`. This is because are not yet
reading the image config and converting it to an OCI runtime config. With the
base laid in this PR, adding such functionality should be straightforward.
While this is a _little_ messy, this is great progress. It should be easy
sailing from here.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-18 00:00:52 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 17:17:34 +00:00
|
|
|
func getTempDir(id string) (string, error) {
|
|
|
|
err := os.MkdirAll(filepath.Join(os.TempDir(), "ctr"), 0700)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
tmpDir, err := ioutil.TempDir(filepath.Join(os.TempDir(), "ctr"), fmt.Sprintf("%s-", id))
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return tmpDir, nil
|
|
|
|
}
|
2017-02-13 18:23:28 +00:00
|
|
|
|
2017-02-16 00:59:58 +00:00
|
|
|
func waitContainer(events execution.ContainerService_EventsClient, response *execution.CreateResponse) (uint32, error) {
|
2017-02-13 18:23:28 +00:00
|
|
|
for {
|
|
|
|
e, err := events.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return 255, err
|
|
|
|
}
|
|
|
|
if e.Type != container.Event_EXIT {
|
|
|
|
continue
|
|
|
|
}
|
2017-02-16 00:59:58 +00:00
|
|
|
if e.ID == response.ID &&
|
|
|
|
e.Pid == response.Pid {
|
2017-02-13 18:23:28 +00:00
|
|
|
return e.ExitStatus, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|