commit
5c94544fb8
4 changed files with 212 additions and 1 deletions
|
@ -21,6 +21,7 @@ var containerCommand = cli.Command{
|
|||
removeContainerCommand,
|
||||
containerStatusCommand,
|
||||
listContainersCommand,
|
||||
execSyncCommand,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -203,6 +204,38 @@ var containerStatusCommand = cli.Command{
|
|||
},
|
||||
}
|
||||
|
||||
var execSyncCommand = cli.Command{
|
||||
Name: "execsync",
|
||||
Usage: "exec a command synchronously in a container",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "id",
|
||||
Value: "",
|
||||
Usage: "id of the container",
|
||||
},
|
||||
cli.Int64Flag{
|
||||
Name: "timeout",
|
||||
Value: 0,
|
||||
Usage: "timeout for the command",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
// Set up a connection to the server.
|
||||
conn, err := getClientConnection(context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pb.NewRuntimeServiceClient(conn)
|
||||
|
||||
err = ExecSync(client, context.String("id"), context.Args(), context.Int64("timeout"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("execing command in container failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type listOptions struct {
|
||||
// id of the container
|
||||
id string
|
||||
|
@ -394,6 +427,29 @@ func ContainerStatus(client pb.RuntimeServiceClient, ID string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExecSync sends an ExecSyncRequest to the server, and parses
|
||||
// the returned ExecSyncResponse.
|
||||
func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout int64) error {
|
||||
if ID == "" {
|
||||
return fmt.Errorf("ID cannot be empty")
|
||||
}
|
||||
r, err := client.ExecSync(context.Background(), &pb.ExecSyncRequest{
|
||||
ContainerId: &ID,
|
||||
Cmd: cmd,
|
||||
Timeout: &timeout,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Stdout:")
|
||||
fmt.Println(string(r.Stdout))
|
||||
fmt.Println("Stderr:")
|
||||
fmt.Println(string(r.Stderr))
|
||||
fmt.Printf("Exit code: %v\n", *r.ExitCode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListContainers sends a ListContainerRequest to the server, and parses
|
||||
// the returned ListContainerResponse.
|
||||
func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error {
|
||||
|
|
94
oci/oci.go
94
oci/oci.go
|
@ -148,6 +148,100 @@ func (r *Runtime) StartContainer(c *Container) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ExecSyncResponse is returned from ExecSync.
|
||||
type ExecSyncResponse struct {
|
||||
Stdout []byte
|
||||
Stderr []byte
|
||||
ExitCode int32
|
||||
}
|
||||
|
||||
// ExecSync execs a command in a container and returns it's stdout, stderr and return code.
|
||||
func (r *Runtime) ExecSync(c *Container, command []string, timeout int64) (resp *ExecSyncResponse, err error) {
|
||||
args := []string{"exec", c.name}
|
||||
args = append(args, command...)
|
||||
cmd := exec.Command(r.Path(), args...)
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: -1,
|
||||
}, err
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
err = unix.Kill(cmd.Process.Pid, syscall.SIGKILL)
|
||||
if err != nil && err != syscall.ESRCH {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: -1,
|
||||
}, fmt.Errorf("failed to kill process on timeout: %v", err)
|
||||
}
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: -1,
|
||||
}, fmt.Errorf("command timed out")
|
||||
case err = <-done:
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: int32(status.ExitStatus()),
|
||||
}, err
|
||||
}
|
||||
} else {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: -1,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: int32(status.ExitStatus()),
|
||||
}, err
|
||||
}
|
||||
} else {
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: -1,
|
||||
}, err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return &ExecSyncResponse{
|
||||
Stdout: stdoutBuf.Bytes(),
|
||||
Stderr: stderrBuf.Bytes(),
|
||||
ExitCode: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StopContainer stops a container.
|
||||
func (r *Runtime) StopContainer(c *Container) error {
|
||||
if err := utils.ExecCmdWithStdStreams(os.Stdin, os.Stdout, os.Stderr, r.path, "kill", c.name); err != nil {
|
||||
|
|
|
@ -577,7 +577,35 @@ func (s *Server) UpdateRuntimeConfig(ctx context.Context, req *pb.UpdateRuntimeC
|
|||
|
||||
// ExecSync runs a command in a container synchronously.
|
||||
func (s *Server) ExecSync(ctx context.Context, req *pb.ExecSyncRequest) (*pb.ExecSyncResponse, error) {
|
||||
return nil, nil
|
||||
logrus.Debugf("ExecSyncRequest %+v", req)
|
||||
c, err := s.getContainerFromRequest(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := s.runtime.UpdateStatus(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cState := s.runtime.ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return nil, fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
cmd := req.GetCmd()
|
||||
if cmd == nil {
|
||||
return nil, fmt.Errorf("exec command cannot be empty")
|
||||
}
|
||||
|
||||
execResp, err := s.runtime.ExecSync(c, cmd, req.GetTimeout())
|
||||
resp := &pb.ExecSyncResponse{
|
||||
Stdout: execResp.Stdout,
|
||||
Stderr: execResp.Stderr,
|
||||
ExitCode: &execResp.ExitCode,
|
||||
}
|
||||
|
||||
logrus.Debugf("ExecSyncResponse: %+v", resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||
|
|
|
@ -327,3 +327,36 @@ function teardown() {
|
|||
cleanup_pods
|
||||
stop_ocid
|
||||
}
|
||||
|
||||
@test "ctr execsync" {
|
||||
# this test requires docker, thus it can't yet be run in a container
|
||||
if [ "$TRAVIS" = "true" ]; then # instead of $TRAVIS, add a function is_containerized to skip here
|
||||
skip "cannot yet run this test in a container, use sudo make localintegration"
|
||||
fi
|
||||
|
||||
start_ocid
|
||||
run ocic pod create --config "$TESTDATA"/sandbox_config.json
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
pod_id="$output"
|
||||
run ocic ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
ctr_id="$output"
|
||||
run ocic ctr start --id "$ctr_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
run ocic ctr execsync --id "$ctr_id" echo HELLO
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ "HELLO" ]]
|
||||
run ocic ctr execsync --id "$ctr_id" --timeout 1 sleep 10
|
||||
echo "$output"
|
||||
[[ "$output" =~ "command timed out" ]]
|
||||
run ocic pod remove --id "$pod_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
cleanup_ctrs
|
||||
cleanup_pods
|
||||
stop_ocid
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue