Add all pids to state output

Also update libcontainer dep

Signed-off-by: Michael Crosby <crosbymichael@gmail.com>
This commit is contained in:
Michael Crosby 2016-02-11 14:07:34 -08:00
parent d6bd304c92
commit 532697f32f
123 changed files with 11277 additions and 547 deletions

View file

@ -221,15 +221,28 @@ func createAPIContainer(c runtime.Container) (*types.Container, error) {
},
})
}
pids, err := c.Pids()
if err != nil {
return nil, grpc.Errorf(codes.Internal, "get all pids for container")
}
return &types.Container{
Id: c.ID(),
BundlePath: c.Path(),
Processes: procs,
Labels: c.Labels(),
Status: string(c.State()),
Pids: toUint32(pids),
}, nil
}
func toUint32(its []int) []uint32 {
o := []uint32{}
for _, i := range its {
o = append(o, uint32(i))
}
return o
}
func (s *apiServer) UpdateContainer(ctx context.Context, r *types.UpdateContainerRequest) (*types.UpdateContainerResponse, error) {
e := supervisor.NewEvent(supervisor.UpdateContainerEventType)
e.ID = r.Id

View file

@ -310,6 +310,7 @@ type Container struct {
Processes []*Process `protobuf:"bytes,4,rep,name=processes" json:"processes,omitempty"`
Status string `protobuf:"bytes,5,opt,name=status" json:"status,omitempty"`
Labels []string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty"`
Pids []uint32 `protobuf:"varint,7,rep,name=pids" json:"pids,omitempty"`
}
func (m *Container) Reset() { *m = Container{} }
@ -1150,102 +1151,102 @@ var _API_serviceDesc = grpc.ServiceDesc{
}
var fileDescriptor0 = []byte{
// 1537 bytes of a gzipped FileDescriptorProto
// 1544 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x58, 0xd9, 0x6e, 0xdc, 0x54,
0x18, 0xce, 0x8c, 0x3d, 0xdb, 0x3f, 0x4b, 0x12, 0x67, 0x9b, 0x4c, 0x29, 0x0d, 0x6e, 0xa1, 0x15,
0xaa, 0xa2, 0x92, 0xb2, 0x94, 0x22, 0x01, 0x25, 0xad, 0x5a, 0x50, 0x0b, 0x51, 0x93, 0x20, 0x71,
0xc3, 0xc8, 0x63, 0x1f, 0x66, 0x0e, 0xf1, 0x86, 0x7d, 0x9c, 0x45, 0xe2, 0x09, 0xe0, 0x92, 0xe7,
0x40, 0xe2, 0x8a, 0x07, 0xe0, 0x09, 0x78, 0x0e, 0x9e, 0x82, 0xff, 0x2c, 0xf6, 0xd8, 0x9e, 0xa5,
0xf4, 0x82, 0x9b, 0x91, 0xce, 0x39, 0xff, 0xf2, 0xfd, 0xdf, 0xbf, 0xf8, 0x9c, 0x81, 0x96, 0x15,
0xd2, 0xfd, 0x30, 0x0a, 0x58, 0x60, 0xd4, 0xd8, 0x55, 0x48, 0x62, 0x73, 0x04, 0x9b, 0xa7, 0xa1,
0x63, 0x31, 0x72, 0x14, 0x05, 0x36, 0x89, 0xe3, 0x97, 0xe4, 0xa7, 0x84, 0xc4, 0xcc, 0x00, 0xa8,
0x52, 0xa7, 0x5f, 0xd9, 0xab, 0xdc, 0x69, 0x19, 0x6d, 0xd0, 0x42, 0x5c, 0x54, 0xc5, 0x02, 0x4f,
0x6c, 0x37, 0x88, 0xc9, 0x31, 0x73, 0xa8, 0xdf, 0xd7, 0x70, 0xaf, 0x69, 0x74, 0xa1, 0x76, 0x41,
0x1d, 0x36, 0xe9, 0xeb, 0xb8, 0xec, 0x1a, 0x3d, 0xa8, 0x4f, 0x08, 0x1d, 0x4f, 0x58, 0xbf, 0xc6,
0xd7, 0xe6, 0x0e, 0x6c, 0x95, 0x7c, 0xc4, 0x61, 0xe0, 0xc7, 0xc4, 0xfc, 0xb5, 0x02, 0xdb, 0x87,
0x11, 0xc1, 0x93, 0xc3, 0xc0, 0x67, 0x16, 0xf5, 0x49, 0x34, 0xcf, 0x3f, 0x2e, 0x46, 0x89, 0xef,
0xb8, 0xe4, 0xc8, 0x42, 0x1f, 0x53, 0x18, 0x13, 0x62, 0x9f, 0x85, 0x01, 0xf5, 0x99, 0x80, 0xd1,
0xe2, 0x30, 0x62, 0x81, 0x4a, 0x17, 0x4b, 0x84, 0x81, 0xcb, 0x20, 0x91, 0x30, 0xd2, 0x35, 0x89,
0xa2, 0x7e, 0x3d, 0x5d, 0xbb, 0xd6, 0x88, 0xb8, 0x71, 0xbf, 0xb1, 0xa7, 0xdd, 0x69, 0x99, 0x9f,
0xc2, 0xce, 0x0c, 0x18, 0x09, 0xd4, 0xb8, 0x09, 0x2d, 0x3b, 0xdd, 0x14, 0xa0, 0xda, 0x07, 0x6b,
0xfb, 0x82, 0xc0, 0xfd, 0x4c, 0xd8, 0x7c, 0x00, 0xdd, 0x63, 0x3a, 0xf6, 0x2d, 0xf7, 0x95, 0x1c,
0x72, 0x24, 0x42, 0x52, 0x00, 0xef, 0x9a, 0x6b, 0xd0, 0x4b, 0x35, 0x15, 0x33, 0x7f, 0x54, 0x60,
0xfd, 0x91, 0xe3, 0x2c, 0x49, 0xca, 0x1a, 0x34, 0x19, 0x89, 0x3c, 0xca, 0xad, 0x54, 0x45, 0x16,
0x76, 0x41, 0x4f, 0x62, 0xc4, 0xa7, 0x09, 0x7c, 0x6d, 0x85, 0xef, 0x14, 0xb7, 0x8c, 0x0e, 0xe8,
0x56, 0x34, 0x8e, 0x91, 0x18, 0x4d, 0x62, 0x21, 0xfe, 0x39, 0xb2, 0xa2, 0x16, 0xf6, 0x85, 0xa3,
0x28, 0x51, 0x28, 0x1b, 0x45, 0x3a, 0x9b, 0x25, 0x3a, 0x5b, 0x25, 0x3a, 0x81, 0xaf, 0x31, 0x7c,
0x5d, 0xf8, 0x42, 0x1b, 0x89, 0x42, 0xd9, 0xe5, 0x8b, 0xb1, 0x0a, 0xbb, 0x6b, 0x6c, 0x43, 0xcf,
0x72, 0x1c, 0xca, 0x68, 0x80, 0xa0, 0x9f, 0x52, 0x27, 0x46, 0xa8, 0x1a, 0x86, 0xbf, 0x09, 0x46,
0x3e, 0x56, 0x45, 0xc1, 0xf3, 0x2c, 0x1d, 0x59, 0x9e, 0xe7, 0xf1, 0xf0, 0x76, 0xa1, 0x10, 0xaa,
0x22, 0xf6, 0xf5, 0x34, 0x37, 0xd9, 0x81, 0x39, 0x80, 0xfe, 0xac, 0x35, 0xe5, 0xe9, 0x3e, 0xec,
0x3c, 0x26, 0x2e, 0x79, 0x95, 0x27, 0x24, 0xd1, 0xb7, 0x3c, 0x22, 0x73, 0xc8, 0x0d, 0xce, 0x2a,
0x29, 0x83, 0x37, 0x61, 0xeb, 0x39, 0x8d, 0xd9, 0x52, 0x73, 0xe6, 0x77, 0x00, 0x53, 0x81, 0xcc,
0x78, 0xe6, 0x8a, 0x5c, 0x52, 0xa6, 0x12, 0x8b, 0x24, 0x32, 0x3b, 0x54, 0xbd, 0xb6, 0x01, 0xed,
0xc4, 0xa7, 0x97, 0xc7, 0x81, 0x7d, 0x46, 0x58, 0x2c, 0x4a, 0x5d, 0x34, 0x60, 0x3c, 0x21, 0xae,
0x2b, 0x2a, 0xbd, 0x69, 0x7e, 0x0e, 0xdb, 0x65, 0xff, 0xaa, 0x90, 0xdf, 0x81, 0xf6, 0x94, 0xad,
0x18, 0xbd, 0x69, 0x8b, 0xe8, 0xea, 0x1c, 0x33, 0x64, 0x6b, 0x1e, 0xf0, 0x3d, 0xe8, 0x65, 0x45,
0x2f, 0x84, 0x64, 0x29, 0x58, 0x2c, 0x89, 0x95, 0xc4, 0xef, 0x15, 0x68, 0xa8, 0x74, 0xa6, 0x25,
0xf5, 0x3f, 0x16, 0xed, 0x3a, 0xb4, 0xe2, 0xab, 0x98, 0x11, 0xef, 0x48, 0x95, 0x6e, 0xf7, 0x75,
0x4b, 0xf7, 0x67, 0x68, 0x65, 0x11, 0x2d, 0x4e, 0x79, 0x69, 0x0e, 0xc9, 0x99, 0xf3, 0x16, 0xb4,
0x42, 0x19, 0x29, 0x91, 0x48, 0xdb, 0x07, 0x3d, 0x15, 0x44, 0xca, 0xc0, 0x94, 0x9d, 0x5a, 0x69,
0xee, 0xd4, 0xc5, 0xdc, 0xb9, 0x0d, 0x8d, 0x17, 0x96, 0x3d, 0x41, 0xe7, 0xdc, 0x9f, 0x1d, 0x2a,
0x1a, 0xc5, 0x1c, 0xf5, 0x88, 0x17, 0x44, 0x57, 0xc2, 0xbf, 0x6e, 0x7e, 0x8b, 0x03, 0x46, 0x26,
0x45, 0x65, 0xf3, 0x16, 0xd6, 0x7e, 0x8a, 0x3b, 0x4d, 0xe6, 0xcc, 0x5c, 0x32, 0x6e, 0x40, 0xc3,
0x93, 0xf6, 0x55, 0x7b, 0xa4, 0x00, 0x95, 0x57, 0xf3, 0x11, 0x6c, 0xcb, 0xf9, 0xbc, 0x74, 0x0a,
0xcf, 0x4c, 0x30, 0x19, 0x93, 0xa0, 0xc1, 0xdc, 0x85, 0x9d, 0x19, 0x13, 0xaa, 0x19, 0x56, 0xa1,
0xfb, 0xe4, 0x9c, 0x60, 0xb5, 0x29, 0xa3, 0xe6, 0xdf, 0x15, 0xa8, 0x89, 0x1d, 0x1e, 0x2e, 0x47,
0xa2, 0x1c, 0x48, 0x67, 0xf3, 0xec, 0x77, 0x4b, 0xd4, 0xeb, 0x79, 0x40, 0xb5, 0xd2, 0x48, 0x95,
0x15, 0x80, 0x41, 0xab, 0xbc, 0x88, 0x1a, 0x98, 0xcd, 0x4a, 0x91, 0xbb, 0xd6, 0x02, 0xee, 0x8a,
0xd3, 0x05, 0x16, 0x4d, 0x97, 0x3f, 0x2b, 0xd0, 0xf9, 0x9a, 0xb0, 0x8b, 0x20, 0x3a, 0xe3, 0x19,
0x8a, 0x4b, 0xed, 0x8c, 0x65, 0x1f, 0x5d, 0x0e, 0x47, 0x57, 0x0c, 0x6b, 0x44, 0xa4, 0x92, 0xc7,
0x83, 0x3b, 0x47, 0x96, 0x6c, 0x62, 0x4d, 0xec, 0x61, 0x1d, 0xbf, 0xbc, 0x1c, 0x62, 0x55, 0x06,
0x91, 0xec, 0x6b, 0x21, 0x86, 0x5b, 0x4e, 0x14, 0x84, 0x21, 0x91, 0x91, 0xea, 0xdc, 0xd8, 0x49,
0x6a, 0xac, 0x9e, 0x4a, 0xe1, 0x4e, 0xa8, 0x8c, 0x35, 0x52, 0x63, 0x27, 0x99, 0xb1, 0x66, 0x4e,
0x2c, 0x35, 0xd6, 0x12, 0x25, 0xe5, 0x41, 0xf3, 0x30, 0x4c, 0x4e, 0x63, 0x6b, 0x4c, 0xf8, 0x64,
0x61, 0x01, 0xb3, 0xdc, 0x61, 0xc2, 0x97, 0x02, 0xba, 0x6e, 0x6c, 0x42, 0x27, 0x24, 0x11, 0x16,
0xa5, 0xda, 0xad, 0x22, 0x51, 0xba, 0x71, 0x0d, 0x36, 0xc4, 0x72, 0x48, 0xfd, 0xe1, 0x19, 0x89,
0x7c, 0xe2, 0x7a, 0x81, 0x43, 0x54, 0x1c, 0xbb, 0xb0, 0x9e, 0x1d, 0xf2, 0xde, 0x16, 0x47, 0x22,
0x1e, 0xf3, 0x04, 0x7a, 0x27, 0x13, 0xbc, 0x7e, 0x30, 0x97, 0xfa, 0xe3, 0xc7, 0x16, 0xb3, 0x8c,
0x55, 0xcc, 0x13, 0x89, 0x68, 0xe0, 0xc4, 0xca, 0x21, 0x6a, 0x33, 0x29, 0x42, 0x9c, 0x61, 0x7a,
0x24, 0x49, 0xc3, 0xef, 0xc7, 0xf4, 0x88, 0x51, 0x4f, 0x39, 0x34, 0xbf, 0x17, 0x41, 0x48, 0xe2,
0x4d, 0xfc, 0x52, 0x67, 0x60, 0xe5, 0x97, 0x7a, 0x35, 0xcd, 0x57, 0x1a, 0xe8, 0x3e, 0xac, 0xb2,
0x0c, 0xc5, 0x10, 0xab, 0xd6, 0x52, 0x8d, 0xb1, 0xa5, 0x24, 0x8b, 0x18, 0xcd, 0xcf, 0x00, 0x5e,
0x88, 0x3e, 0x14, 0x88, 0x71, 0xb6, 0xe4, 0x09, 0x42, 0xa2, 0x3d, 0xeb, 0x32, 0x63, 0x87, 0x6f,
0x61, 0x4c, 0x3f, 0x58, 0xd4, 0xb5, 0xd5, 0xc5, 0x44, 0x37, 0xff, 0xa9, 0x40, 0x5b, 0x5a, 0x90,
0x20, 0xd1, 0x84, 0x8d, 0xbd, 0x97, 0x9a, 0xd8, 0x4b, 0x2d, 0x16, 0xbf, 0x5e, 0x39, 0x9f, 0x58,
0x86, 0xf1, 0x85, 0x15, 0x2a, 0x2f, 0xda, 0x22, 0xb1, 0xdb, 0xd0, 0x91, 0xd9, 0x50, 0x82, 0xfa,
0x22, 0xc1, 0xbb, 0x7c, 0x3e, 0x22, 0x12, 0x31, 0x4e, 0xdb, 0x07, 0xd7, 0x0b, 0x12, 0x02, 0xe3,
0xbe, 0xf8, 0x7d, 0xe2, 0xb3, 0xe8, 0x6a, 0x70, 0x17, 0x60, 0xba, 0xe2, 0x6d, 0x77, 0x46, 0xae,
0x54, 0x65, 0x63, 0x24, 0xe7, 0x96, 0x9b, 0xa8, 0xc8, 0x1f, 0x56, 0x1f, 0x54, 0xcc, 0xaf, 0x60,
0xf5, 0x0b, 0xf7, 0x8c, 0x06, 0x39, 0x15, 0x94, 0xf2, 0xac, 0x1f, 0x83, 0x48, 0xc5, 0xcb, 0x97,
0xd4, 0xc7, 0xa5, 0xa4, 0x0b, 0xfb, 0x3e, 0x08, 0xa7, 0x57, 0x38, 0x69, 0x4f, 0xd6, 0xcb, 0x5f,
0x1a, 0xc0, 0xd4, 0x98, 0xf1, 0x10, 0x06, 0x34, 0x18, 0x62, 0x49, 0x9d, 0x53, 0x9b, 0xc8, 0x16,
0x18, 0x46, 0xc4, 0x4e, 0xa2, 0x98, 0x9e, 0x13, 0x35, 0xff, 0xb6, 0x55, 0x2c, 0x65, 0x0c, 0x1f,
0xc0, 0xd6, 0x54, 0xd7, 0xc9, 0xa9, 0x55, 0x97, 0xaa, 0xdd, 0x87, 0x0d, 0x54, 0xc3, 0xc1, 0x95,
0x14, 0x94, 0xb4, 0xa5, 0x4a, 0x1f, 0xc3, 0x6e, 0x0e, 0x27, 0xaf, 0xd4, 0x9c, 0xaa, 0xbe, 0x54,
0xf5, 0x43, 0xd8, 0x46, 0xd5, 0x0b, 0x8b, 0xb2, 0xb2, 0x5e, 0xed, 0x3f, 0xe0, 0xf4, 0x48, 0x34,
0x2e, 0xe0, 0xac, 0x2f, 0x55, 0x7a, 0x0f, 0xd6, 0x51, 0xa9, 0xe4, 0xa7, 0xf1, 0x2a, 0x95, 0x98,
0xd8, 0x0c, 0xa7, 0x4a, 0x4e, 0xa5, 0xb9, 0x4c, 0x05, 0x3f, 0x2f, 0x9d, 0x67, 0xc9, 0x98, 0x30,
0x77, 0x94, 0x55, 0xff, 0xeb, 0x36, 0xd0, 0x2f, 0x55, 0x68, 0x1f, 0x8e, 0xa3, 0x20, 0x09, 0x0b,
0x5d, 0x2e, 0x6b, 0x78, 0xa6, 0xcb, 0xa5, 0xcc, 0x1d, 0xe8, 0xc8, 0xaf, 0xa7, 0x12, 0x93, 0xcd,
0x65, 0xcc, 0x96, 0x3a, 0xbf, 0x14, 0x8d, 0x38, 0x66, 0x25, 0x58, 0x6c, 0xaf, 0x5c, 0xf9, 0x7d,
0x02, 0xdd, 0x89, 0x0c, 0x44, 0x49, 0xca, 0x54, 0xde, 0x4a, 0x3d, 0x4f, 0x01, 0xee, 0xe7, 0x03,
0x96, 0x4d, 0xf4, 0x0c, 0xd6, 0x67, 0x36, 0x8b, 0xbd, 0x64, 0xe6, 0x7b, 0xa9, 0x7d, 0xb0, 0xa1,
0xcc, 0xe6, 0xb5, 0x44, 0x83, 0x85, 0x50, 0x93, 0x78, 0xde, 0x85, 0xae, 0x2f, 0x3f, 0x3a, 0x19,
0x13, 0x5a, 0x4e, 0xb1, 0xf0, 0x41, 0x42, 0x36, 0x6c, 0x81, 0x6f, 0x2e, 0x1b, 0x79, 0x6e, 0x31,
0x1f, 0xbc, 0x22, 0x50, 0xcc, 0x0b, 0x15, 0xfd, 0xea, 0x36, 0x38, 0xef, 0x1d, 0x72, 0xf0, 0x5b,
0x1d, 0xb4, 0x47, 0x47, 0x5f, 0x1a, 0x2f, 0x61, 0xb5, 0xf4, 0x7a, 0x32, 0xd2, 0xb1, 0x32, 0xff,
0x89, 0x37, 0x78, 0x73, 0xd1, 0xb1, 0xba, 0x38, 0xac, 0x70, 0x9b, 0xa5, 0x5b, 0x45, 0x66, 0x73,
0xfe, 0x85, 0x25, 0xb3, 0xb9, 0xe8, 0x32, 0xb2, 0x62, 0x7c, 0x04, 0x75, 0xf9, 0xd6, 0x32, 0x36,
0x95, 0x6c, 0xe1, 0xd1, 0x36, 0xd8, 0x2a, 0xed, 0x66, 0x8a, 0xcf, 0xa1, 0x5b, 0x78, 0xc5, 0x1a,
0xd7, 0x0a, 0xbe, 0x8a, 0x4f, 0xb5, 0xc1, 0x1b, 0xf3, 0x0f, 0x33, 0x6b, 0x87, 0x00, 0xd3, 0x37,
0x8f, 0xd1, 0x57, 0xd2, 0x33, 0x4f, 0xbe, 0xc1, 0xee, 0x9c, 0x93, 0xcc, 0xc8, 0x29, 0xac, 0x95,
0x1f, 0x35, 0x46, 0x89, 0xd5, 0xf2, 0x13, 0x64, 0x70, 0x63, 0xe1, 0x79, 0xde, 0x6c, 0xf9, 0x69,
0x93, 0x99, 0x5d, 0xf0, 0x50, 0xca, 0xcc, 0x2e, 0x7c, 0x13, 0xad, 0x18, 0xdf, 0x40, 0xaf, 0xf8,
0x2a, 0x31, 0x52, 0x92, 0xe6, 0x3e, 0x96, 0x06, 0xd7, 0x17, 0x9c, 0x66, 0x06, 0xdf, 0x97, 0x8d,
0x80, 0x37, 0x97, 0x34, 0x67, 0xb9, 0x27, 0xcb, 0x60, 0xb3, 0xb8, 0x99, 0x69, 0xdd, 0x83, 0xba,
0xbc, 0x8f, 0x66, 0x05, 0x50, 0xb8, 0x9e, 0x0e, 0x3a, 0xf9, 0x5d, 0x73, 0xe5, 0x5e, 0x05, 0x67,
0x5e, 0xf3, 0x29, 0x61, 0xb2, 0x3b, 0xf2, 0xae, 0x66, 0x54, 0xc4, 0x26, 0x57, 0x19, 0xd5, 0xc5,
0x9f, 0x2c, 0xf7, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa5, 0xa9, 0x7b, 0x24, 0x71, 0x11, 0x00,
0x00,
0xc3, 0xc8, 0x63, 0x1f, 0x66, 0x0e, 0xf1, 0x86, 0x7d, 0x9c, 0xe5, 0x15, 0xca, 0x25, 0xcf, 0x81,
0xc4, 0x15, 0x0f, 0xc0, 0x13, 0xf0, 0x1c, 0x3c, 0x05, 0xff, 0x59, 0xec, 0xb1, 0x3d, 0x4b, 0xe9,
0x05, 0x37, 0x23, 0x9d, 0x73, 0xfe, 0xe5, 0xfb, 0xbf, 0x7f, 0xf1, 0x39, 0x03, 0x2d, 0x2b, 0xa4,
0xfb, 0x61, 0x14, 0xb0, 0xc0, 0xa8, 0xb1, 0xab, 0x90, 0xc4, 0xe6, 0x08, 0x36, 0x4f, 0x43, 0xc7,
0x62, 0xe4, 0x28, 0x0a, 0x6c, 0x12, 0xc7, 0x2f, 0xc9, 0x2f, 0x09, 0x89, 0x99, 0x01, 0x50, 0xa5,
0x4e, 0xbf, 0xb2, 0x57, 0xb9, 0xd3, 0x32, 0xda, 0xa0, 0x85, 0xb8, 0xa8, 0x8a, 0x05, 0x9e, 0xd8,
0x6e, 0x10, 0x93, 0x63, 0xe6, 0x50, 0xbf, 0xaf, 0xe1, 0x5e, 0xd3, 0xe8, 0x42, 0xed, 0x82, 0x3a,
0x6c, 0xd2, 0xd7, 0x71, 0xd9, 0x35, 0x7a, 0x50, 0x9f, 0x10, 0x3a, 0x9e, 0xb0, 0x7e, 0x8d, 0xaf,
0xcd, 0x1d, 0xd8, 0x2a, 0xf9, 0x88, 0xc3, 0xc0, 0x8f, 0x89, 0xf9, 0x6b, 0x05, 0xb6, 0x0f, 0x23,
0x82, 0x27, 0x87, 0x81, 0xcf, 0x2c, 0xea, 0x93, 0x68, 0x9e, 0x7f, 0x5c, 0x8c, 0x12, 0xdf, 0x71,
0xc9, 0x91, 0x85, 0x3e, 0xa6, 0x30, 0x26, 0xc4, 0x3e, 0x0b, 0x03, 0xea, 0x33, 0x01, 0xa3, 0xc5,
0x61, 0xc4, 0x02, 0x95, 0x2e, 0x96, 0x08, 0x03, 0x97, 0x41, 0x22, 0x61, 0xa4, 0x6b, 0x12, 0x45,
0xfd, 0x7a, 0xba, 0x76, 0xad, 0x11, 0x71, 0xe3, 0x7e, 0x63, 0x4f, 0xbb, 0xd3, 0x32, 0x3f, 0x87,
0x9d, 0x19, 0x30, 0x12, 0xa8, 0x71, 0x13, 0x5a, 0x76, 0xba, 0x29, 0x40, 0xb5, 0x0f, 0xd6, 0xf6,
0x05, 0x81, 0xfb, 0x99, 0xb0, 0xf9, 0x00, 0xba, 0xc7, 0x74, 0xec, 0x5b, 0xee, 0x6b, 0x39, 0xe4,
0x48, 0x84, 0xa4, 0x00, 0xde, 0x35, 0xd7, 0xa0, 0x97, 0x6a, 0x2a, 0x66, 0xfe, 0xa8, 0xc0, 0xfa,
0x23, 0xc7, 0x59, 0x92, 0x94, 0x35, 0x68, 0x32, 0x12, 0x79, 0x94, 0x5b, 0xa9, 0x8a, 0x2c, 0xec,
0x82, 0x9e, 0xc4, 0x88, 0x4f, 0x13, 0xf8, 0xda, 0x0a, 0xdf, 0x29, 0x6e, 0x19, 0x1d, 0xd0, 0xad,
0x68, 0x1c, 0x23, 0x31, 0x9a, 0xc4, 0x42, 0xfc, 0x73, 0x64, 0x45, 0x2d, 0xec, 0x0b, 0x47, 0x51,
0xa2, 0x50, 0x36, 0x8a, 0x74, 0x36, 0x4b, 0x74, 0xb6, 0x4a, 0x74, 0x02, 0x5f, 0x63, 0xf8, 0xba,
0xf0, 0x85, 0x36, 0x12, 0x85, 0xb2, 0xcb, 0x17, 0x63, 0x15, 0x76, 0xd7, 0xd8, 0x86, 0x9e, 0xe5,
0x38, 0x94, 0xd1, 0x00, 0x41, 0x3f, 0xa5, 0x4e, 0x8c, 0x50, 0x35, 0x0c, 0x7f, 0x13, 0x8c, 0x7c,
0xac, 0x8a, 0x82, 0xe7, 0x59, 0x3a, 0xb2, 0x3c, 0xcf, 0xe3, 0xe1, 0xdd, 0x42, 0x21, 0x54, 0x45,
0xec, 0xeb, 0x69, 0x6e, 0xb2, 0x03, 0x73, 0x00, 0xfd, 0x59, 0x6b, 0xca, 0xd3, 0x7d, 0xd8, 0x79,
0x4c, 0x5c, 0xf2, 0x3a, 0x4f, 0x48, 0xa2, 0x6f, 0x79, 0x44, 0xe6, 0x90, 0x1b, 0x9c, 0x55, 0x52,
0x06, 0x6f, 0xc2, 0xd6, 0x73, 0x1a, 0xb3, 0xa5, 0xe6, 0xcc, 0x1f, 0x00, 0xa6, 0x02, 0x99, 0xf1,
0xcc, 0x15, 0xb9, 0xa4, 0x4c, 0x25, 0x16, 0x49, 0x64, 0x76, 0xa8, 0x7a, 0x6d, 0x03, 0xda, 0x89,
0x4f, 0x2f, 0x8f, 0x03, 0xfb, 0x8c, 0xb0, 0x58, 0x94, 0xba, 0x68, 0xc0, 0x78, 0x42, 0x5c, 0x57,
0x54, 0x7a, 0xd3, 0xfc, 0x12, 0xb6, 0xcb, 0xfe, 0x55, 0x21, 0xbf, 0x07, 0xed, 0x29, 0x5b, 0x31,
0x7a, 0xd3, 0x16, 0xd1, 0xd5, 0x39, 0x66, 0xc8, 0xd6, 0x3c, 0xe0, 0x7b, 0xd0, 0xcb, 0x8a, 0x5e,
0x08, 0xc9, 0x52, 0xb0, 0x58, 0x12, 0x2b, 0x89, 0xdf, 0x2b, 0xd0, 0x50, 0xe9, 0x4c, 0x4b, 0xea,
0x7f, 0x2c, 0xda, 0x75, 0x68, 0xc5, 0x57, 0x31, 0x23, 0xde, 0x91, 0x2a, 0xdd, 0xee, 0x9b, 0x96,
0xee, 0xab, 0x0a, 0xb4, 0xb2, 0x90, 0x16, 0xe7, 0xbc, 0x34, 0x88, 0xe4, 0xd0, 0x79, 0x07, 0x5a,
0xa1, 0x0c, 0x95, 0x48, 0xa8, 0xed, 0x83, 0x9e, 0x8a, 0x22, 0xa5, 0x60, 0x4a, 0x4f, 0xad, 0x34,
0x78, 0xea, 0x22, 0x1a, 0x74, 0x12, 0xf2, 0x6e, 0x68, 0x88, 0x6e, 0xb8, 0x0d, 0x8d, 0x17, 0x96,
0x3d, 0x41, 0x28, 0xfc, 0xc0, 0x0e, 0x15, 0xab, 0x62, 0xac, 0x7a, 0xc4, 0x0b, 0xa2, 0x2b, 0x81,
0x46, 0x37, 0xbf, 0xc7, 0x79, 0x23, 0x73, 0xa4, 0x92, 0x7b, 0x0b, 0x5b, 0x21, 0x8d, 0x22, 0xcd,
0xed, 0xcc, 0x98, 0x32, 0x6e, 0x40, 0xc3, 0x93, 0xf6, 0x55, 0xb7, 0xa4, 0x70, 0x95, 0x57, 0xf3,
0x11, 0x6c, 0xcb, 0x71, 0xbd, 0x74, 0x28, 0xcf, 0x0c, 0x34, 0x19, 0xa1, 0x20, 0xc5, 0xdc, 0x85,
0x9d, 0x19, 0x13, 0xaa, 0x37, 0x56, 0xa1, 0xfb, 0xe4, 0x9c, 0x60, 0xf1, 0x29, 0xa3, 0xe6, 0xdf,
0x15, 0xa8, 0x89, 0x1d, 0x1e, 0x2e, 0x47, 0xa2, 0x1c, 0x48, 0x67, 0xf3, 0xec, 0x77, 0x4b, 0x89,
0xd0, 0xf3, 0x80, 0x6a, 0xa5, 0x09, 0x2b, 0x0b, 0x02, 0x83, 0x56, 0x59, 0x12, 0x25, 0x31, 0x9b,
0xa3, 0x22, 0x77, 0xad, 0x05, 0xdc, 0x15, 0x87, 0x0d, 0x2c, 0x1a, 0x36, 0x7f, 0x56, 0xa0, 0xf3,
0x2d, 0x61, 0x17, 0x41, 0x74, 0xc6, 0x33, 0x14, 0x97, 0xba, 0x1b, 0xbb, 0x20, 0xba, 0x1c, 0x8e,
0xae, 0x18, 0x56, 0x8c, 0x48, 0x25, 0x8f, 0x07, 0x77, 0x8e, 0x2c, 0xd9, 0xd3, 0x9a, 0xd8, 0xc3,
0xb2, 0x7e, 0x79, 0x39, 0xc4, 0x22, 0x0d, 0x22, 0xd9, 0xe6, 0x42, 0x0c, 0xb7, 0x9c, 0x28, 0x08,
0x43, 0x22, 0x23, 0xd5, 0xb9, 0xb1, 0x93, 0xd4, 0x58, 0x3d, 0x95, 0xc2, 0x9d, 0x50, 0x19, 0x6b,
0xa4, 0xc6, 0x4e, 0x32, 0x63, 0xcd, 0x9c, 0x58, 0x6a, 0xac, 0x25, 0x4a, 0xca, 0x83, 0xe6, 0x61,
0x98, 0x9c, 0xc6, 0xd6, 0x98, 0xf0, 0x41, 0xc3, 0x02, 0x66, 0xb9, 0xc3, 0x84, 0x2f, 0x05, 0x74,
0xdd, 0xd8, 0x84, 0x4e, 0x48, 0x22, 0x2c, 0x4a, 0xb5, 0x5b, 0x45, 0xa2, 0x74, 0xe3, 0x1a, 0x6c,
0x88, 0xe5, 0x90, 0xfa, 0xc3, 0x33, 0x12, 0xf9, 0xc4, 0xf5, 0x02, 0x87, 0xa8, 0x38, 0x76, 0x61,
0x3d, 0x3b, 0xe4, 0xad, 0x2e, 0x8e, 0x44, 0x3c, 0xe6, 0x09, 0xf4, 0x4e, 0x26, 0x78, 0x1b, 0x61,
0x2e, 0xf5, 0xc7, 0x8f, 0x2d, 0x66, 0x19, 0xab, 0x98, 0x27, 0x12, 0xd1, 0xc0, 0x89, 0x95, 0x43,
0xd4, 0x66, 0x52, 0x84, 0x38, 0xc3, 0xf4, 0x48, 0x92, 0x86, 0x9f, 0x93, 0xe9, 0x11, 0xa3, 0x9e,
0x72, 0x68, 0xfe, 0x28, 0x82, 0x90, 0xc4, 0x9b, 0xf8, 0xe1, 0xce, 0xc0, 0xca, 0x0f, 0xf7, 0x6a,
0x9a, 0xaf, 0x34, 0xd0, 0x7d, 0x58, 0x65, 0x19, 0x8a, 0x21, 0x56, 0xad, 0xa5, 0x1a, 0x63, 0x4b,
0x49, 0x16, 0x31, 0x9a, 0x5f, 0x00, 0xbc, 0x10, 0x7d, 0x28, 0x10, 0xe3, 0xa8, 0xc9, 0x13, 0x84,
0x44, 0x7b, 0xd6, 0x65, 0xc6, 0x0e, 0xdf, 0xc2, 0x98, 0x7e, 0xb2, 0xa8, 0x6b, 0xab, 0x7b, 0x8a,
0x6e, 0xfe, 0x53, 0x81, 0xb6, 0xb4, 0x20, 0x41, 0xa2, 0x09, 0x1b, 0x7b, 0x2f, 0x35, 0xb1, 0x97,
0x5a, 0x2c, 0x7e, 0xcc, 0x72, 0x3e, 0xb1, 0x0c, 0xe3, 0x0b, 0x2b, 0x54, 0x5e, 0xb4, 0x45, 0x62,
0xb7, 0xa1, 0x23, 0xb3, 0xa1, 0x04, 0xf5, 0x45, 0x82, 0x77, 0xf9, 0xb8, 0x44, 0x24, 0x62, 0xba,
0xb6, 0x0f, 0xae, 0x17, 0x24, 0x04, 0xc6, 0x7d, 0xf1, 0xfb, 0xc4, 0x67, 0xd1, 0xd5, 0xe0, 0x2e,
0xc0, 0x74, 0xc5, 0xdb, 0xee, 0x8c, 0x5c, 0xa9, 0xca, 0xc6, 0x48, 0xce, 0x2d, 0x37, 0x51, 0x91,
0x3f, 0xac, 0x3e, 0xa8, 0x98, 0xdf, 0xc0, 0xea, 0x57, 0xee, 0x19, 0x0d, 0x72, 0x2a, 0x28, 0xe5,
0x59, 0x3f, 0x07, 0x91, 0x8a, 0x97, 0x2f, 0xa9, 0x8f, 0x4b, 0x49, 0x17, 0xf6, 0x7d, 0x10, 0x4e,
0x6f, 0x74, 0xd2, 0x9e, 0xac, 0x97, 0xbf, 0x34, 0x80, 0xa9, 0x31, 0xe3, 0x21, 0x0c, 0x68, 0x30,
0xc4, 0x92, 0x3a, 0xa7, 0x36, 0x91, 0x2d, 0x30, 0x8c, 0x88, 0x9d, 0x44, 0x31, 0x3d, 0x27, 0x6a,
0xfe, 0x6d, 0xab, 0x58, 0xca, 0x18, 0x3e, 0x82, 0xad, 0xa9, 0xae, 0x93, 0x53, 0xab, 0x2e, 0x55,
0xbb, 0x0f, 0x1b, 0xa8, 0x86, 0x83, 0x2b, 0x29, 0x28, 0x69, 0x4b, 0x95, 0x3e, 0x85, 0xdd, 0x1c,
0x4e, 0x5e, 0xa9, 0x39, 0x55, 0x7d, 0xa9, 0xea, 0xc7, 0xb0, 0x8d, 0xaa, 0x17, 0x16, 0x65, 0x65,
0xbd, 0xda, 0x7f, 0xc0, 0xe9, 0x91, 0x68, 0x5c, 0xc0, 0x59, 0x5f, 0xaa, 0xf4, 0x01, 0xac, 0xa3,
0x52, 0xc9, 0x4f, 0xe3, 0x75, 0x2a, 0x31, 0xb1, 0x19, 0x4e, 0x95, 0x9c, 0x4a, 0x73, 0x99, 0x0a,
0x7e, 0x5e, 0x3a, 0xcf, 0x92, 0x31, 0x61, 0xee, 0x28, 0xab, 0xfe, 0x37, 0x6d, 0xa0, 0x57, 0x55,
0x68, 0x1f, 0x8e, 0xa3, 0x20, 0x09, 0x0b, 0x5d, 0x2e, 0x6b, 0x78, 0xa6, 0xcb, 0xa5, 0xcc, 0x1d,
0xe8, 0xc8, 0xaf, 0xa7, 0x12, 0x93, 0xcd, 0x65, 0xcc, 0x96, 0x3a, 0xbf, 0x23, 0x8d, 0x38, 0x66,
0x25, 0x58, 0x6c, 0xaf, 0x5c, 0xf9, 0x7d, 0x06, 0xdd, 0x89, 0x0c, 0x44, 0x49, 0xca, 0x54, 0xde,
0x4a, 0x3d, 0x4f, 0x01, 0xee, 0xe7, 0x03, 0x96, 0x4d, 0xf4, 0x0c, 0xd6, 0x67, 0x36, 0x8b, 0xbd,
0x64, 0xe6, 0x7b, 0xa9, 0x7d, 0xb0, 0xa1, 0xcc, 0xe6, 0xb5, 0x44, 0x83, 0x85, 0x50, 0x93, 0x78,
0xde, 0x87, 0xae, 0x2f, 0x3f, 0x3a, 0x19, 0x13, 0x5a, 0x4e, 0xb1, 0xf0, 0x41, 0x42, 0x36, 0x6c,
0x81, 0x6f, 0x2e, 0x1b, 0x79, 0x6e, 0x31, 0x1f, 0xbc, 0x22, 0x50, 0xcc, 0x0b, 0x15, 0xfd, 0xea,
0x72, 0x38, 0xef, 0x59, 0x72, 0xf0, 0x5b, 0x1d, 0xb4, 0x47, 0x47, 0x5f, 0x1b, 0x2f, 0x61, 0xb5,
0xf4, 0x98, 0x32, 0xd2, 0xb1, 0x32, 0xff, 0xc5, 0x37, 0x78, 0x7b, 0xd1, 0xb1, 0xba, 0x38, 0xac,
0x70, 0x9b, 0xa5, 0x5b, 0x45, 0x66, 0x73, 0xfe, 0x85, 0x25, 0xb3, 0xb9, 0xe8, 0x32, 0xb2, 0x62,
0x7c, 0x02, 0x75, 0xf9, 0xf4, 0x32, 0x36, 0x95, 0x6c, 0xe1, 0x0d, 0x37, 0xd8, 0x2a, 0xed, 0x66,
0x8a, 0xcf, 0xa1, 0x5b, 0x78, 0xd4, 0x1a, 0xd7, 0x0a, 0xbe, 0x8a, 0x2f, 0xb7, 0xc1, 0x5b, 0xf3,
0x0f, 0x33, 0x6b, 0x87, 0x00, 0xd3, 0x27, 0x90, 0xd1, 0x57, 0xd2, 0x33, 0x2f, 0xc0, 0xc1, 0xee,
0x9c, 0x93, 0xcc, 0xc8, 0x29, 0xac, 0x95, 0xdf, 0x38, 0x46, 0x89, 0xd5, 0xf2, 0x8b, 0x64, 0x70,
0x63, 0xe1, 0x79, 0xde, 0x6c, 0xf9, 0xa5, 0x93, 0x99, 0x5d, 0xf0, 0x6e, 0xca, 0xcc, 0x2e, 0x7c,
0x22, 0xad, 0x18, 0xdf, 0x41, 0xaf, 0xf8, 0x48, 0x31, 0x52, 0x92, 0xe6, 0xbe, 0x9d, 0x06, 0xd7,
0x17, 0x9c, 0x66, 0x06, 0x3f, 0x94, 0x8d, 0x80, 0x37, 0x97, 0x34, 0x67, 0xb9, 0x17, 0xcc, 0x60,
0xb3, 0xb8, 0x99, 0x69, 0xdd, 0x83, 0xba, 0xbc, 0x8f, 0x66, 0x05, 0x50, 0xb8, 0x9e, 0x0e, 0x3a,
0xf9, 0x5d, 0x73, 0xe5, 0x5e, 0x05, 0x67, 0x5e, 0xf3, 0x29, 0x61, 0xb2, 0x3b, 0xf2, 0xae, 0x66,
0x54, 0xc4, 0x26, 0x57, 0x19, 0xd5, 0xc5, 0x7f, 0x2e, 0xf7, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff,
0x66, 0x65, 0xcd, 0xa0, 0x80, 0x11, 0x00, 0x00,
}

View file

@ -132,6 +132,7 @@ message Container {
repeated Process processes = 4; // List of processes which run in container
string status = 5; // Container status ("running", "paused", etc.)
repeated string labels = 6;
repeated uint32 pids = 7;
}
// Machine is information about machine on which containerd is run

View file

@ -10,6 +10,7 @@ import (
"time"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/specs"
)
@ -42,6 +43,8 @@ type Container interface {
DeleteCheckpoint(name string) error
// Labels are user provided labels for the container
Labels() []string
// Pids returns all pids inside the container
Pids() ([]int, error)
// Stats returns realtime container stats and resource information
// Stats() (*Stat, error) // OOM signals the channel if the container received an OOM notification
// OOM() (<-chan struct{}, error)
@ -353,6 +356,18 @@ func (c *container) DeleteCheckpoint(name string) error {
return os.RemoveAll(filepath.Join(c.bundle, "checkpoints", name))
}
func (c *container) Pids() ([]int, error) {
f, err := libcontainer.New(specs.LinuxStateDirectory, libcontainer.Cgroupfs)
if err != nil {
return nil, err
}
container, err := f.Load(c.id)
if err != nil {
return nil, err
}
return container.Processes()
}
func getRootIDs(s *specs.LinuxSpec) (int, int, error) {
if s == nil {
return 0, 0, nil

View file

@ -0,0 +1,2 @@
vendor/pkg
runc

View file

@ -0,0 +1,117 @@
## Contribution Guidelines
### Pull requests are always welcome
We are always thrilled to receive pull requests, and do our best to
process them as fast as possible. Not sure if that typo is worth a pull
request? Do it! We will appreciate it.
If your pull request is not accepted on the first try, don't be
discouraged! If there's a problem with the implementation, hopefully you
received feedback on what to improve.
We're trying very hard to keep runc lean and focused. We don't want it
to do everything for everybody. This means that we might decide against
incorporating a new feature. However, there might be a way to implement
that feature *on top of* runc.
### Conventions
Fork the repo and make changes on your fork in a feature branch:
- If it's a bugfix branch, name it XXX-something where XXX is the number of the
issue
- If it's a feature branch, create an enhancement issue to announce your
intentions, and name it XXX-something where XXX is the number of the issue.
Submit unit tests for your changes. Go has a great test framework built in; use
it! Take a look at existing tests for inspiration. Run the full test suite on
your branch before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean documentation build. See ``docs/README.md`` for more
information on building the docs and how docs get released.
Write clean code. Universally formatted code promotes ease of writing, reading,
and maintenance. Always run `gofmt -s -w file.go` on each changed file before
committing your changes. Most editors have plugins that do this automatically.
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Pull requests must not contain commits from other users or branches.
Commit messages must start with a capitalized and short summary (max. 50
chars) written in the imperative, followed by an optional, more detailed
explanatory text which is separated from the summary by an empty line.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
request automatically, but the reviewers will not be notified unless you
comment.
Before the pull request is merged, make sure that you squash your commits into
logical units of work using `git rebase -i` and `git push -f`. After every
commit the test suite should be passing. Include documentation changes in the
same commit so that a revert would remove all traces of the feature or fix.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
### Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from
[developercertificate.org](http://developercertificate.org/)):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe@gmail.com>
using your real name (sorry, no pseudonyms or anonymous contributions.)
You can add the sign off when creating the git commit via `git commit -s`.

View file

@ -0,0 +1,7 @@
Michael Crosby <michael@docker.com> (@crosbymichael)
Rohit Jnagal <jnagal@google.com> (@rjnagal)
Victor Marmol <vmarmol@google.com> (@vmarmol)
Mrunal Patel <mpatel@redhat.com> (@mrunalp)
Alexander Morozov <lk4d4@docker.com> (@LK4D4)
Daniel, Dao Quang Minh <dqminh89@gmail.com> (@dqminh)
Andrey Vagin <avagin@virtuozzo.com> (@avagin)

View file

@ -0,0 +1,120 @@
## Introduction
Dear maintainer. Thank you for investing the time and energy to help
make runc as useful as possible. Maintaining a project is difficult,
sometimes unrewarding work. Sure, you will get to contribute cool
features to the project. But most of your time will be spent reviewing,
cleaning up, documenting, answering questions, justifying design
decisions - while everyone has all the fun! But remember - the quality
of the maintainers work is what distinguishes the good projects from the
great. So please be proud of your work, even the unglamourous parts,
and encourage a culture of appreciation and respect for *every* aspect
of improving the project - not just the hot new features.
This document is a manual for maintainers old and new. It explains what
is expected of maintainers, how they should work, and what tools are
available to them.
This is a living document - if you see something out of date or missing,
speak up!
## What are a maintainer's responsibility?
It is every maintainer's responsibility to:
* 1) Expose a clear roadmap for improving their component.
* 2) Deliver prompt feedback and decisions on pull requests.
* 3) Be available to anyone with questions, bug reports, criticism etc.
on their component. This includes IRC and GitHub issues and pull requests.
* 4) Make sure their component respects the philosophy, design and
roadmap of the project.
## How are decisions made?
Short answer: with pull requests to the runc repository.
runc is an open-source project with an open design philosophy. This
means that the repository is the source of truth for EVERY aspect of the
project, including its philosophy, design, roadmap and APIs. *If it's
part of the project, it's in the repo. It's in the repo, it's part of
the project.*
As a result, all decisions can be expressed as changes to the
repository. An implementation change is a change to the source code. An
API change is a change to the API specification. A philosophy change is
a change to the philosophy manifesto. And so on.
All decisions affecting runc, big and small, follow the same 3 steps:
* Step 1: Open a pull request. Anyone can do this.
* Step 2: Discuss the pull request. Anyone can do this.
* Step 3: Accept (`LGTM`) or refuse a pull request. The relevant maintainers do
this (see below "Who decides what?")
### I'm a maintainer, should I make pull requests too?
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
## Who decides what?
All decisions are pull requests, and the relevant maintainers make
decisions by accepting or refusing the pull request. Review and acceptance
by anyone is denoted by adding a comment in the pull request: `LGTM`.
However, only currently listed `MAINTAINERS` are counted towards the required
two LGTMs.
Overall the maintainer system works because of mutual respect across the
maintainers of the project. The maintainers trust one another to make decisions
in the best interests of the project. Sometimes maintainers can disagree and
this is part of a healthy project to represent the point of views of various people.
In the case where maintainers cannot find agreement on a specific change the
role of a Chief Maintainer comes into play.
The Chief Maintainer for the project is responsible for overall architecture
of the project to maintain conceptual integrity. Large decisions and
architecture changes should be reviewed by the chief maintainer.
The current chief maintainer for the project is Michael Crosby (@crosbymichael).
Even though the maintainer system is built on trust, if there is a conflict
with the chief maintainer on a decision, their decision can be challenged
and brought to the technical oversight board if two-thirds of the
maintainers vote for an appeal. It is expected that this would be a
very exceptional event.
### How are maintainers added?
The best maintainers have a vested interest in the project. Maintainers
are first and foremost contributors that have shown they are committed to
the long term success of the project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code,
pull request review, and triage of issues in the project for more than two months.
Just contributing does not make you a maintainer, it is about building trust
with the current maintainers of the project and being a person that they can
depend on and trust to make decisions in the best interest of the project. The
final vote to add a new maintainer should be approved by over 66% of the current
maintainers with the chief maintainer having veto power. In case of a veto,
conflict resolution rules expressed above apply. The voting period is
five business days on the Pull Request to add the new maintainer.
### What is expected of maintainers?
Part of a healthy project is to have active maintainers to support the community
in contributions and perform tasks to keep the project running. Maintainers are
expected to be able to respond in a timely manner if their help is required on specific
issues where they are pinged. Being a maintainer is a time consuming commitment and should
not be taken lightly.
When a maintainer is unable to perform the required duties they can be removed with
a vote by 66% of the current maintainers with the chief maintainer having veto power.
The voting period is ten business days. Issues related to a maintainer's performance should
be discussed with them among the other maintainers so that they are not surprised by
a pull request removing them.

View file

@ -0,0 +1,40 @@
RUNC_TEST_IMAGE=runc_test
PROJECT=github.com/opencontainers/runc
TEST_DOCKERFILE=script/test_Dockerfile
BUILDTAGS=seccomp
export GOPATH:=$(CURDIR)/Godeps/_workspace:$(GOPATH)
all:
go build -tags "$(BUILDTAGS)" -o runc .
static:
CGO_ENABLED=1 go build -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static" -o runc .
vet:
go get golang.org/x/tools/cmd/vet
lint: vet
go vet ./...
go fmt ./...
runctestimage:
docker build -t $(RUNC_TEST_IMAGE) -f $(TEST_DOCKERFILE) .
test: runctestimage
docker run -e TESTFLAGS --privileged --rm -v $(CURDIR):/go/src/$(PROJECT) $(RUNC_TEST_IMAGE) make localtest
localtest:
go test -tags "$(BUILDTAGS)" ${TESTFLAGS} -v ./...
install:
cp runc /usr/local/bin/runc
clean:
rm runc
validate: vet
script/validate-gofmt
go vet ./...
ci: validate localtest

View file

@ -0,0 +1,17 @@
runc
Copyright 2012-2015 Docker, Inc.
This product includes software developed at Docker, Inc. (http://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see http://www.bis.doc.gov
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.

View file

@ -0,0 +1,19 @@
# runc principles
In the design and development of runc and libcontainer we try to follow these principles:
(Work in progress)
* Don't try to replace every tool. Instead, be an ingredient to improve them.
* Less code is better.
* Fewer components are better. Do you really need to add one more class?
* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand.
* Don't do later what you can do now. "//TODO: refactor" is not acceptable in new code.
* When hesitating between two options, choose the one that is easier to reverse.
* "No" is temporary; "Yes" is forever. If you're not sure about a new feature, say no. You can change your mind later.
* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable.
* The fewer moving parts in a container, the better.
* Don't merge it unless you document it.
* Don't document it unless you can keep it up-to-date.
* Don't merge it unless you test it!
* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that.

View file

@ -0,0 +1,144 @@
[![Build Status](https://jenkins.dockerproject.org/buildStatus/icon?job=runc Master)](https://jenkins.dockerproject.org/job/runc Master)
## runc
`runc` is a CLI tool for spawning and running containers according to the OCF specification.
## State of the project
Currently `runc` is an implementation of the OCI specification. We are currently sprinting
to have a v1 of the spec out. So the `runc` config format will be constantly changing until
the spec is finalized. However, we encourage you to try out the tool and give feedback.
### OCF
How does `runc` integrate with the Open Container Initiative Specification?
`runc` depends on the types specified in the
[specs](https://github.com/opencontainers/specs) repository. Whenever the
specification is updated and ready to be versioned `runc` will update its dependency
on the specs repository and support the update spec.
### Building:
At the time of writing, runc only builds on the Linux platform.
```bash
# create a 'github.com/opencontainers' in your GOPATH/src
cd github.com/opencontainers
git clone https://github.com/opencontainers/runc
cd runc
make
sudo make install
```
In order to enable seccomp support you will need to install libseccomp on your platform.
If you do not with to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make.
#### Build Tags
`runc` supports optional build tags for compiling in support for various features.
| Build Tag | Feature | Dependency |
|-----------|------------------------------------|-------------|
| seccomp | Syscall filtering | libseccomp |
| selinux | selinux process and mount labeling | <none> |
| apparmor | apparmor profile support | libapparmor |
### Testing:
You can run tests for runC by using command:
```bash
# make test
```
Note that test cases are run in Docker container, so you need to install
`docker` first. And test requires mounting cgroups inside container, it's
done by docker now, so you need a docker version newer than 1.8.0-rc2.
You can also run specific test cases by:
```bash
# make test TESTFLAGS="-run=SomeTestFunction"
```
### Using:
To run a container with the id "test", execute `runc start` with the containers id as arg one
in the bundle's root directory:
```bash
runc start test
/ $ ps
PID USER COMMAND
1 daemon sh
5 daemon sh
/ $
```
### OCI Container JSON Format:
OCI container JSON format is based on OCI [specs](https://github.com/opencontainers/specs).
You can generate JSON files by using `runc spec`.
It assumes that the file-system is found in a directory called
`rootfs` and there is a user with uid and gid of `0` defined within that file-system.
### Examples:
#### Using a Docker image (requires version 1.3 or later)
To test using Docker's `busybox` image follow these steps:
* Install `docker` and download the `busybox` image: `docker pull busybox`
* Create a container from that image and export its contents to a tar file:
`docker export $(docker create busybox) > busybox.tar`
* Untar the contents to create your filesystem directory:
```
mkdir rootfs
tar -C rootfs -xf busybox.tar
```
* Create `config.json` by using `runc spec`.
* Execute `runc start` and you should be placed into a shell where you can run `ps`:
```
$ runc start test
/ # ps
PID USER COMMAND
1 root sh
9 root ps
```
#### Using runc with systemd
To use runc with systemd, you can create a unit file
`/usr/lib/systemd/system/minecraft.service` as below (edit your
own Description or WorkingDirectory or service name as you need).
```service
[Unit]
Description=Minecraft Build Server
Documentation=http://minecraft.net
After=network.target
[Service]
CPUQuota=200%
MemoryLimit=1536M
ExecStart=/usr/local/bin/runc start minecraft
Restart=on-failure
WorkingDirectory=/containers/minecraftbuild
[Install]
WantedBy=multi-user.target
```
Make sure you have the bundle's root directory and JSON configs in
your WorkingDirectory, then use systemd commands to start the service:
```bash
systemctl daemon-reload
systemctl start minecraft.service
```
Note that if you use JSON configs by `runc spec`, you need to modify
`config.json` and change `process.terminal` to false so runc won't
create tty, because we can't set terminal from the stdin when using
systemd service.

View file

@ -0,0 +1,84 @@
// +build linux
package main
import (
"fmt"
"strconv"
"strings"
"github.com/codegangsta/cli"
"github.com/opencontainers/runc/libcontainer"
)
var checkpointCommand = cli.Command{
Name: "checkpoint",
Usage: "checkpoint a running container",
Flags: []cli.Flag{
cli.StringFlag{Name: "image-path", Value: "", Usage: "path for saving criu image files"},
cli.StringFlag{Name: "work-path", Value: "", Usage: "path for saving work files and logs"},
cli.BoolFlag{Name: "leave-running", Usage: "leave the process running after checkpointing"},
cli.BoolFlag{Name: "tcp-established", Usage: "allow open tcp connections"},
cli.BoolFlag{Name: "ext-unix-sk", Usage: "allow external unix sockets"},
cli.BoolFlag{Name: "shell-job", Usage: "allow shell jobs"},
cli.StringFlag{Name: "page-server", Value: "", Usage: "ADDRESS:PORT of the page server"},
cli.BoolFlag{Name: "file-locks", Usage: "handle file locks, for safety"},
cli.StringFlag{Name: "manage-cgroups-mode", Value: "", Usage: "cgroups mode: 'soft' (default), 'full' and 'strict'."},
},
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
fatal(err)
}
defer destroy(container)
options := criuOptions(context)
// these are the mandatory criu options for a container
setPageServer(context, options)
setManageCgroupsMode(context, options)
if err := container.Checkpoint(options); err != nil {
fatal(err)
}
},
}
func getCheckpointImagePath(context *cli.Context) string {
imagePath := context.String("image-path")
if imagePath == "" {
imagePath = getDefaultImagePath(context)
}
return imagePath
}
func setPageServer(context *cli.Context, options *libcontainer.CriuOpts) {
// xxx following criu opts are optional
// The dump image can be sent to a criu page server
if psOpt := context.String("page-server"); psOpt != "" {
addressPort := strings.Split(psOpt, ":")
if len(addressPort) != 2 {
fatal(fmt.Errorf("Use --page-server ADDRESS:PORT to specify page server"))
}
portInt, err := strconv.Atoi(addressPort[1])
if err != nil {
fatal(fmt.Errorf("Invalid port number"))
}
options.PageServer = libcontainer.CriuPageServerInfo{
Address: addressPort[0],
Port: int32(portInt),
}
}
}
func setManageCgroupsMode(context *cli.Context, options *libcontainer.CriuOpts) {
if cgOpt := context.String("manage-cgroups-mode"); cgOpt != "" {
switch cgOpt {
case "soft":
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_SOFT
case "full":
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_FULL
case "strict":
options.ManageCgroupsMode = libcontainer.CRIU_CG_MODE_STRICT
default:
fatal(fmt.Errorf("Invalid manage cgroups mode"))
}
}
}

View file

@ -0,0 +1,15 @@
package main
import "github.com/codegangsta/cli"
var deleteCommand = cli.Command{
Name: "delete",
Usage: "delete any resources held by the container often used with detached containers",
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
fatal(err)
}
destroy(container)
},
}

View file

@ -0,0 +1,95 @@
// +build linux
package main
import (
"encoding/json"
"os"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/opencontainers/runc/libcontainer"
)
// event struct for encoding the event data to json.
type event struct {
Type string `json:"type"`
ID string `json:"id"`
Data interface{} `json:"data,omitempty"`
}
var eventsCommand = cli.Command{
Name: "events",
Usage: "display container events such as OOM notifications, cpu, memory, IO and network stats",
Flags: []cli.Flag{
cli.DurationFlag{Name: "interval", Value: 5 * time.Second, Usage: "set the stats collection interval"},
cli.BoolFlag{Name: "stats", Usage: "display the container's stats then exit"},
},
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
logrus.Fatal(err)
}
var (
stats = make(chan *libcontainer.Stats, 1)
events = make(chan *event, 1024)
group = &sync.WaitGroup{}
)
group.Add(1)
go func() {
defer group.Done()
enc := json.NewEncoder(os.Stdout)
for e := range events {
if err := enc.Encode(e); err != nil {
logrus.Error(err)
}
}
}()
if context.Bool("stats") {
s, err := container.Stats()
if err != nil {
fatal(err)
}
events <- &event{Type: "stats", ID: container.ID(), Data: s}
close(events)
group.Wait()
return
}
go func() {
for range time.Tick(context.Duration("interval")) {
s, err := container.Stats()
if err != nil {
logrus.Error(err)
continue
}
stats <- s
}
}()
n, err := container.NotifyOOM()
if err != nil {
logrus.Fatal(err)
}
for {
select {
case _, ok := <-n:
if ok {
// this means an oom event was received, if it is !ok then
// the channel was closed because the container stopped and
// the cgroups no longer exist.
events <- &event{Type: "oom", ID: container.ID()}
} else {
n = nil
}
case s := <-stats:
events <- &event{Type: "stats", ID: container.ID(), Data: s}
}
if n == nil {
close(events)
break
}
}
group.Wait()
},
}

View file

@ -0,0 +1,140 @@
// +build linux
package main
import (
"encoding/json"
"fmt"
"os"
"path"
"strconv"
"strings"
"github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
"github.com/opencontainers/specs"
)
var execCommand = cli.Command{
Name: "exec",
Usage: "execute new process inside the container",
Flags: []cli.Flag{
cli.StringFlag{
Name: "console",
Usage: "specify the pty slave path for use with the container",
},
cli.StringFlag{
Name: "cwd",
Usage: "current working directory in the container",
},
cli.StringSliceFlag{
Name: "env, e",
Usage: "set environment variables",
},
cli.BoolFlag{
Name: "tty, t",
Usage: "allocate a pseudo-TTY",
},
cli.StringFlag{
Name: "user, u",
Usage: "UID (format: <uid>[:<gid>])",
},
cli.StringFlag{
Name: "process,p",
Usage: "path to the process.json",
},
cli.BoolFlag{
Name: "detach,d",
Usage: "detach from the container's process",
},
cli.StringFlag{
Name: "pid-file",
Value: "",
Usage: "specify the file to write the process id to",
},
},
Action: func(context *cli.Context) {
if os.Geteuid() != 0 {
logrus.Fatal("runc should be run as root")
}
status, err := execProcess(context)
if err != nil {
logrus.Fatalf("exec failed: %v", err)
}
os.Exit(status)
},
}
func execProcess(context *cli.Context) (int, error) {
container, err := getContainer(context)
if err != nil {
return -1, err
}
var (
detach = context.Bool("detach")
rootfs = container.Config().Rootfs
)
p, err := getProcess(context, path.Dir(rootfs))
if err != nil {
return -1, err
}
return runProcess(container, p, nil, context.String("console"), context.String("pid-file"), detach)
}
func getProcess(context *cli.Context, bundle string) (*specs.Process, error) {
if path := context.String("process"); path != "" {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var p specs.Process
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
return &p, nil
}
// process via cli flags
if err := os.Chdir(bundle); err != nil {
return nil, err
}
spec, err := loadSpec(specConfig)
if err != nil {
return nil, err
}
p := spec.Process
p.Args = context.Args()[1:]
// override the cwd, if passed
if context.String("cwd") != "" {
p.Cwd = context.String("cwd")
}
// append the passed env variables
for _, e := range context.StringSlice("env") {
p.Env = append(p.Env, e)
}
// set the tty
if context.IsSet("tty") {
p.Terminal = context.Bool("tty")
}
// override the user, if passed
if context.String("user") != "" {
u := strings.SplitN(context.String("user"), ":", 2)
if len(u) > 1 {
gid, err := strconv.Atoi(u[1])
if err != nil {
return nil, fmt.Errorf("parsing %s as int for gid failed: %v", u[1], err)
}
p.User.GID = uint32(gid)
}
uid, err := strconv.Atoi(u[0])
if err != nil {
return nil, fmt.Errorf("parsing %s as int for uid failed: %v", u[0], err)
}
p.User.UID = uint32(uid)
}
return &p, nil
}

View file

@ -0,0 +1,87 @@
// +build linux
package main
import (
"fmt"
"strconv"
"strings"
"syscall"
"github.com/codegangsta/cli"
)
var signalMap = map[string]syscall.Signal{
"ABRT": syscall.SIGABRT,
"ALRM": syscall.SIGALRM,
"BUS": syscall.SIGBUS,
"CHLD": syscall.SIGCHLD,
"CLD": syscall.SIGCLD,
"CONT": syscall.SIGCONT,
"FPE": syscall.SIGFPE,
"HUP": syscall.SIGHUP,
"ILL": syscall.SIGILL,
"INT": syscall.SIGINT,
"IO": syscall.SIGIO,
"IOT": syscall.SIGIOT,
"KILL": syscall.SIGKILL,
"PIPE": syscall.SIGPIPE,
"POLL": syscall.SIGPOLL,
"PROF": syscall.SIGPROF,
"PWR": syscall.SIGPWR,
"QUIT": syscall.SIGQUIT,
"SEGV": syscall.SIGSEGV,
"STKFLT": syscall.SIGSTKFLT,
"STOP": syscall.SIGSTOP,
"SYS": syscall.SIGSYS,
"TERM": syscall.SIGTERM,
"TRAP": syscall.SIGTRAP,
"TSTP": syscall.SIGTSTP,
"TTIN": syscall.SIGTTIN,
"TTOU": syscall.SIGTTOU,
"UNUSED": syscall.SIGUNUSED,
"URG": syscall.SIGURG,
"USR1": syscall.SIGUSR1,
"USR2": syscall.SIGUSR2,
"VTALRM": syscall.SIGVTALRM,
"WINCH": syscall.SIGWINCH,
"XCPU": syscall.SIGXCPU,
"XFSZ": syscall.SIGXFSZ,
}
var killCommand = cli.Command{
Name: "kill",
Usage: "kill sends the specified signal (default: SIGTERM) to the container's init process",
Action: func(context *cli.Context) {
container, err := getContainer(context)
if err != nil {
fatal(err)
}
sigstr := context.Args().Get(1)
if sigstr == "" {
sigstr = "SIGTERM"
}
signal, err := parseSignal(sigstr)
if err != nil {
fatal(err)
}
if err := container.Signal(signal); err != nil {
fatal(err)
}
},
}
func parseSignal(rawSignal string) (syscall.Signal, error) {
s, err := strconv.Atoi(rawSignal)
if err == nil {
return syscall.Signal(s), nil
}
signal, ok := signalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
if !ok {
return -1, fmt.Errorf("unknown signal %q", rawSignal)
}
return signal, nil
}

View file

@ -10,80 +10,165 @@ host system and which is (optionally) isolated from other containers in the syst
#### Using libcontainer
To create a container you first have to initialize an instance of a factory
that will handle the creation and initialization for a container.
Because containers are spawned in a two step process you will need to provide
arguments to a binary that will be executed as the init process for the container.
To use the current binary that is spawning the containers and acting as the parent
you can use `os.Args[0]` and we have a command called `init` setup.
Because containers are spawned in a two step process you will need a binary that
will be executed as the init process for the container. In libcontainer, we use
the current binary (/proc/self/exe) to be executed as the init process, and use
arg "init", we call the first step process "bootstrap", so you always need a "init"
function as the entry of "bootstrap".
```go
root, err := libcontainer.New("/var/lib/container", libcontainer.InitArgs(os.Args[0], "init"))
func init() {
if len(os.Args) > 1 && os.Args[1] == "init" {
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, _ := libcontainer.New("")
if err := factory.StartInitialization(); err != nil {
logrus.Fatal(err)
}
panic("--this line should have never been executed, congratulations--")
}
}
```
Then to create a container you first have to initialize an instance of a factory
that will handle the creation and initialization for a container.
```go
factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
return
}
```
Once you have an instance of the factory created we can create a configuration
struct describing how the container is to be created. A sample would look similar to this:
struct describing how the container is to be created. A sample would look similar to this:
```go
defaultMountFlags := syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
config := &configs.Config{
Rootfs: rootfs,
Capabilities: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Namespaces: configs.Namespaces([]configs.Namespace{
{Type: configs.NEWNS},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
{Type: configs.NEWPID},
{Type: configs.NEWNET},
}),
Cgroups: &configs.Cgroup{
Name: "test-container",
Parent: "system",
AllowAllDevices: false,
AllowedDevices: configs.DefaultAllowedDevices,
},
Devices: configs.DefaultAutoCreatedDevices,
Hostname: "testing",
Networks: []*configs.Network{
{
Type: "loopback",
Address: "127.0.0.1/0",
Gateway: "localhost",
},
},
Rlimits: []configs.Rlimit{
{
Type: syscall.RLIMIT_NOFILE,
Hard: uint64(1024),
Soft: uint64(1024),
},
},
Rootfs: "/your/path/to/rootfs",
Capabilities: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Namespaces: configs.Namespaces([]configs.Namespace{
{Type: configs.NEWNS},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
{Type: configs.NEWPID},
{Type: configs.NEWUSER},
{Type: configs.NEWNET},
}),
Cgroups: &configs.Cgroup{
Name: "test-container",
Parent: "system",
Resources: &configs.Resources{
MemorySwappiness: -1,
AllowAllDevices: false,
AllowedDevices: configs.DefaultAllowedDevices,
},
},
MaskPaths: []string{
"/proc/kcore",
},
ReadonlyPaths: []string{
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
},
Devices: configs.DefaultAutoCreatedDevices,
Hostname: "testing",
Mounts: []*configs.Mount{
{
Source: "proc",
Destination: "/proc",
Device: "proc",
Flags: defaultMountFlags,
},
{
Source: "tmpfs",
Destination: "/dev",
Device: "tmpfs",
Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME,
Data: "mode=755",
},
{
Source: "devpts",
Destination: "/dev/pts",
Device: "devpts",
Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC,
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
},
{
Device: "tmpfs",
Source: "shm",
Destination: "/dev/shm",
Data: "mode=1777,size=65536k",
Flags: defaultMountFlags,
},
{
Source: "mqueue",
Destination: "/dev/mqueue",
Device: "mqueue",
Flags: defaultMountFlags,
},
{
Source: "sysfs",
Destination: "/sys",
Device: "sysfs",
Flags: defaultMountFlags | syscall.MS_RDONLY,
},
},
UidMappings: []configs.IDMap{
{
ContainerID: 0,
Host: 1000,
size: 65536,
},
},
GidMappings: []configs.IDMap{
{
ContainerID: 0,
Host: 1000,
size: 65536,
},
},
Networks: []*configs.Network{
{
Type: "loopback",
Address: "127.0.0.1/0",
Gateway: "localhost",
},
},
Rlimits: []configs.Rlimit{
{
Type: syscall.RLIMIT_NOFILE,
Hard: uint64(1025),
Soft: uint64(1025),
},
},
}
```
Once you have the configuration populated you can create a container:
```go
container, err := root.Create("container-id", config)
container, err := factory.Create("container-id", config)
if err != nil {
logrus.Fatal(err)
return
}
```
To spawn bash as the initial process inside the container and have the
@ -91,23 +176,25 @@ processes pid returned in order to wait, signal, or kill the process:
```go
process := &libcontainer.Process{
Args: []string{"/bin/bash"},
Env: []string{"PATH=/bin"},
User: "daemon",
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Args: []string{"/bin/bash"},
Env: []string{"PATH=/bin"},
User: "daemon",
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
err := container.Start(process)
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
container.Destroy()
return
}
// wait for the process to finish.
status, err := process.Wait()
_, err := process.Wait()
if err != nil {
log.Fatal(err)
logrus.Fatal(err)
}
// destroy the container.
@ -124,7 +211,6 @@ processes, err := container.Processes()
// it's processes.
stats, err := container.Stats()
// pause all processes inside the container.
container.Pause()

View file

@ -142,6 +142,7 @@ system resources like cpu, memory, and device access.
| perf_event | 1 |
| freezer | 1 |
| hugetlb | 1 |
| pids | 1 |
All cgroup subsystem are joined so that statistics can be collected from
@ -199,7 +200,7 @@ provide a good default for security and flexibility for the applications.
| CAP_SYS_BOOT | 0 |
| CAP_LEASE | 0 |
| CAP_WAKE_ALARM | 0 |
| CAP_BLOCK_SUSPE | 0 |
| CAP_BLOCK_SUSPEND | 0 |
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)

View file

@ -15,6 +15,9 @@ type Manager interface {
// Returns the PIDs inside the cgroup set
GetPids() ([]int, error)
// Returns the PIDs inside the cgroup set & all sub-cgroups
GetAllPids() ([]int, error)
// Returns statistics for the cgroup set
GetStats() (*Stats, error)

View file

@ -0,0 +1,18 @@
// +build linux
package cgroups
import (
"testing"
)
func TestParseCgroups(t *testing.T) {
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
t.Fatal(err)
}
if _, ok := cgroups["cpu"]; !ok {
t.Fail()
}
}

View file

@ -23,6 +23,7 @@ var (
&MemoryGroup{},
&CpuGroup{},
&CpuacctGroup{},
&PidsGroup{},
&BlkioGroup{},
&HugetlbGroup{},
&NetClsGroup{},
@ -93,11 +94,10 @@ func getCgroupRoot() (string, error) {
}
type cgroupData struct {
root string
parent string
name string
config *configs.Cgroup
pid int
root string
innerPath string
config *configs.Cgroup
pid int
}
func (m *Manager) Apply(pid int) (err error) {
@ -112,6 +112,22 @@ func (m *Manager) Apply(pid int) (err error) {
return err
}
if c.Paths != nil {
paths := make(map[string]string)
for name, path := range c.Paths {
_, err := d.path(name)
if err != nil {
if cgroups.IsNotFound(err) {
continue
}
return err
}
paths[name] = path
}
m.Paths = paths
return cgroups.EnterPid(m.Paths, pid)
}
paths := make(map[string]string)
defer func() {
if err != nil {
@ -135,17 +151,13 @@ func (m *Manager) Apply(pid int) (err error) {
paths[sys.Name()] = p
}
m.Paths = paths
if paths["cpu"] != "" {
if err := CheckCpushares(paths["cpu"], c.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
func (m *Manager) Destroy() error {
if m.Cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
if err := cgroups.RemovePaths(m.Paths); err != nil {
@ -179,15 +191,28 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) {
}
func (m *Manager) Set(container *configs.Config) error {
for name, path := range m.Paths {
sys, err := subsystems.Get(name)
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
continue
for _, sys := range subsystems {
// Generate fake cgroup data.
d, err := getCgroupData(container.Cgroups, -1)
if err != nil {
return err
}
// Get the path, but don't error out if the cgroup wasn't found.
path, err := d.path(sys.Name())
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := sys.Set(path, container.Cgroups); err != nil {
return err
}
}
if m.Paths["cpu"] != "" {
if err := CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
@ -217,31 +242,50 @@ func (m *Manager) Freeze(state configs.FreezerState) error {
}
func (m *Manager) GetPids() ([]int, error) {
d, err := getCgroupData(m.Cgroups, 0)
dir, err := getCgroupPath(m.Cgroups)
if err != nil {
return nil, err
}
dir, err := d.path("devices")
if err != nil {
return nil, err
}
return cgroups.GetPids(dir)
}
func (m *Manager) GetAllPids() ([]int, error) {
dir, err := getCgroupPath(m.Cgroups)
if err != nil {
return nil, err
}
return cgroups.GetAllPids(dir)
}
func getCgroupPath(c *configs.Cgroup) (string, error) {
d, err := getCgroupData(c, 0)
if err != nil {
return "", err
}
return d.path("devices")
}
func getCgroupData(c *configs.Cgroup, pid int) (*cgroupData, error) {
root, err := getCgroupRoot()
if err != nil {
return nil, err
}
if (c.Name != "" || c.Parent != "") && c.Path != "" {
return nil, fmt.Errorf("cgroup: either Path or Name and Parent should be used")
}
innerPath := c.Path
if innerPath == "" {
innerPath = filepath.Join(c.Parent, c.Name)
}
return &cgroupData{
root: root,
parent: c.Parent,
name: c.Name,
config: c,
pid: pid,
root: root,
innerPath: c.Path,
config: c,
pid: pid,
}, nil
}
@ -269,11 +313,10 @@ func (raw *cgroupData) path(subsystem string) (string, error) {
return "", err
}
cgPath := filepath.Join(raw.parent, raw.name)
// If the cgroup name/path is absolute do not look relative to the cgroup of the init process.
if filepath.IsAbs(cgPath) {
if filepath.IsAbs(raw.innerPath) {
// Sometimes subsystems can be mounted togethger as 'cpu,cpuacct'.
return filepath.Join(raw.root, filepath.Base(mnt), cgPath), nil
return filepath.Join(raw.root, filepath.Base(mnt), raw.innerPath), nil
}
parentPath, err := raw.parentPath(subsystem, mnt, root)
@ -281,7 +324,7 @@ func (raw *cgroupData) path(subsystem string) (string, error) {
return "", err
}
return filepath.Join(parentPath, cgPath), nil
return filepath.Join(parentPath, raw.innerPath), nil
}
func (raw *cgroupData) join(subsystem string) (string, error) {

View file

@ -22,15 +22,10 @@ func (s *BlkioGroup) Name() string {
}
func (s *BlkioGroup) Apply(d *cgroupData) error {
dir, err := d.join("blkio")
_, err := d.join("blkio")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,636 @@
// +build linux
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
const (
sectorsRecursiveContents = `8:0 1024`
serviceBytesRecursiveContents = `8:0 Read 100
8:0 Write 200
8:0 Sync 300
8:0 Async 500
8:0 Total 500
Total 500`
servicedRecursiveContents = `8:0 Read 10
8:0 Write 40
8:0 Sync 20
8:0 Async 30
8:0 Total 50
Total 50`
queuedRecursiveContents = `8:0 Read 1
8:0 Write 4
8:0 Sync 2
8:0 Async 3
8:0 Total 5
Total 5`
serviceTimeRecursiveContents = `8:0 Read 173959
8:0 Write 0
8:0 Sync 0
8:0 Async 173959
8:0 Total 17395
Total 17395`
waitTimeRecursiveContents = `8:0 Read 15571
8:0 Write 0
8:0 Sync 0
8:0 Async 15571
8:0 Total 15571`
mergedRecursiveContents = `8:0 Read 5
8:0 Write 10
8:0 Sync 0
8:0 Async 0
8:0 Total 15
Total 15`
timeRecursiveContents = `8:0 8`
throttleServiceBytes = `8:0 Read 11030528
8:0 Write 23
8:0 Sync 42
8:0 Async 11030528
8:0 Total 11030528
252:0 Read 11030528
252:0 Write 23
252:0 Sync 42
252:0 Async 11030528
252:0 Total 11030528
Total 22061056`
throttleServiced = `8:0 Read 164
8:0 Write 23
8:0 Sync 42
8:0 Async 164
8:0 Total 164
252:0 Read 164
252:0 Write 23
252:0 Sync 42
252:0 Async 164
252:0 Total 164
Total 328`
)
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) {
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
}
func TestBlkioSetWeight(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
weightBefore = 100
weightAfter = 200
)
helper.writeFileContents(map[string]string{
"blkio.weight": strconv.Itoa(weightBefore),
})
helper.CgroupData.config.Resources.BlkioWeight = weightAfter
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "blkio.weight")
if err != nil {
t.Fatalf("Failed to parse blkio.weight - %s", err)
}
if value != weightAfter {
t.Fatal("Got the wrong value, set blkio.weight failed.")
}
}
func TestBlkioSetWeightDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
weightDeviceBefore = "8:0 400"
)
wd := configs.NewWeightDevice(8, 0, 500, 0)
weightDeviceAfter := wd.WeightString()
helper.writeFileContents(map[string]string{
"blkio.weight_device": weightDeviceBefore,
})
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
if err != nil {
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
}
if value != weightDeviceAfter {
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
}
}
// regression #274
func TestBlkioSetMultipleWeightDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
weightDeviceBefore = "8:0 400"
)
wd1 := configs.NewWeightDevice(8, 0, 500, 0)
wd2 := configs.NewWeightDevice(8, 16, 500, 0)
// we cannot actually set and check both because normal ioutil.WriteFile
// when writing to cgroup file will overwrite the whole file content instead
// of updating it as the kernel is doing. Just check the second device
// is present will suffice for the test to ensure multiple writes are done.
weightDeviceAfter := wd2.WeightString()
helper.writeFileContents(map[string]string{
"blkio.weight_device": weightDeviceBefore,
})
helper.CgroupData.config.Resources.BlkioWeightDevice = []*configs.WeightDevice{wd1, wd2}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.weight_device")
if err != nil {
t.Fatalf("Failed to parse blkio.weight_device - %s", err)
}
if value != weightDeviceAfter {
t.Fatal("Got the wrong value, set blkio.weight_device failed.")
}
}
func TestBlkioStats(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total")
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioStatsNoSectorsFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoServicedFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoQueuedFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoServiceTimeFile(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoWaitTimeFile(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoMergedFile(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsNoTimeFile(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatalf("Failed unexpectedly: %s", err)
}
}
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read Write",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestNonCFQBlkioStats(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": "",
"blkio.io_serviced_recursive": "",
"blkio.io_queued_recursive": "",
"blkio.sectors_recursive": "",
"blkio.io_service_time_recursive": "",
"blkio.io_wait_time_recursive": "",
"blkio.io_merged_recursive": "",
"blkio.time_recursive": "",
"blkio.throttle.io_service_bytes": throttleServiceBytes,
"blkio.throttle.io_serviced": throttleServiced,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
throttleBefore = `8:0 1024`
)
td := configs.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
helper.writeFileContents(map[string]string{
"blkio.throttle.read_bps_device": throttleBefore,
})
helper.CgroupData.config.Resources.BlkioThrottleReadBpsDevice = []*configs.ThrottleDevice{td}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_bps_device")
if err != nil {
t.Fatalf("Failed to parse blkio.throttle.read_bps_device - %s", err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
}
}
func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
throttleBefore = `8:0 1024`
)
td := configs.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
helper.writeFileContents(map[string]string{
"blkio.throttle.write_bps_device": throttleBefore,
})
helper.CgroupData.config.Resources.BlkioThrottleWriteBpsDevice = []*configs.ThrottleDevice{td}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_bps_device")
if err != nil {
t.Fatalf("Failed to parse blkio.throttle.write_bps_device - %s", err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
}
}
func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
throttleBefore = `8:0 1024`
)
td := configs.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
helper.writeFileContents(map[string]string{
"blkio.throttle.read_iops_device": throttleBefore,
})
helper.CgroupData.config.Resources.BlkioThrottleReadIOPSDevice = []*configs.ThrottleDevice{td}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.read_iops_device")
if err != nil {
t.Fatalf("Failed to parse blkio.throttle.read_iops_device - %s", err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
}
}
func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
const (
throttleBefore = `8:0 1024`
)
td := configs.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
helper.writeFileContents(map[string]string{
"blkio.throttle.write_iops_device": throttleBefore,
})
helper.CgroupData.config.Resources.BlkioThrottleWriteIOPSDevice = []*configs.ThrottleDevice{td}
blkio := &BlkioGroup{}
if err := blkio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "blkio.throttle.write_iops_device")
if err != nil {
t.Fatalf("Failed to parse blkio.throttle.write_iops_device - %s", err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
}
}

View file

@ -22,15 +22,10 @@ func (s *CpuGroup) Name() string {
func (s *CpuGroup) Apply(d *cgroupData) error {
// We always want to join the cpu group, to allow fair cpu scheduling
// on a container basis
dir, err := d.join("cpu")
_, err := d.join("cpu")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,163 @@
// +build linux
package fs
import (
"fmt"
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func TestCpuSetShares(t *testing.T) {
helper := NewCgroupTestUtil("cpu", t)
defer helper.cleanup()
const (
sharesBefore = 1024
sharesAfter = 512
)
helper.writeFileContents(map[string]string{
"cpu.shares": strconv.Itoa(sharesBefore),
})
helper.CgroupData.config.Resources.CpuShares = sharesAfter
cpu := &CpuGroup{}
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "cpu.shares")
if err != nil {
t.Fatalf("Failed to parse cpu.shares - %s", err)
}
if value != sharesAfter {
t.Fatal("Got the wrong value, set cpu.shares failed.")
}
}
func TestCpuSetBandWidth(t *testing.T) {
helper := NewCgroupTestUtil("cpu", t)
defer helper.cleanup()
const (
quotaBefore = 8000
quotaAfter = 5000
periodBefore = 10000
periodAfter = 7000
rtRuntimeBefore = 8000
rtRuntimeAfter = 5000
rtPeriodBefore = 10000
rtPeriodAfter = 7000
)
helper.writeFileContents(map[string]string{
"cpu.cfs_quota_us": strconv.Itoa(quotaBefore),
"cpu.cfs_period_us": strconv.Itoa(periodBefore),
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
})
helper.CgroupData.config.Resources.CpuQuota = quotaAfter
helper.CgroupData.config.Resources.CpuPeriod = periodAfter
helper.CgroupData.config.Resources.CpuRtRuntime = rtRuntimeAfter
helper.CgroupData.config.Resources.CpuRtPeriod = rtPeriodAfter
cpu := &CpuGroup{}
if err := cpu.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
quota, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_quota_us")
if err != nil {
t.Fatalf("Failed to parse cpu.cfs_quota_us - %s", err)
}
if quota != quotaAfter {
t.Fatal("Got the wrong value, set cpu.cfs_quota_us failed.")
}
period, err := getCgroupParamUint(helper.CgroupPath, "cpu.cfs_period_us")
if err != nil {
t.Fatalf("Failed to parse cpu.cfs_period_us - %s", err)
}
if period != periodAfter {
t.Fatal("Got the wrong value, set cpu.cfs_period_us failed.")
}
rtRuntime, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_runtime_us")
if err != nil {
t.Fatalf("Failed to parse cpu.rt_runtime_us - %s", err)
}
if rtRuntime != rtRuntimeAfter {
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
}
rtPeriod, err := getCgroupParamUint(helper.CgroupPath, "cpu.rt_period_us")
if err != nil {
t.Fatalf("Failed to parse cpu.rt_period_us - %s", err)
}
if rtPeriod != rtPeriodAfter {
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
}
}
func TestCpuStats(t *testing.T) {
helper := NewCgroupTestUtil("cpu", t)
defer helper.cleanup()
const (
kNrPeriods = 2000
kNrThrottled = 200
kThrottledTime = uint64(18446744073709551615)
)
cpuStatContent := fmt.Sprintf("nr_periods %d\n nr_throttled %d\n throttled_time %d\n",
kNrPeriods, kNrThrottled, kThrottledTime)
helper.writeFileContents(map[string]string{
"cpu.stat": cpuStatContent,
})
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.ThrottlingData{
Periods: kNrPeriods,
ThrottledPeriods: kNrThrottled,
ThrottledTime: kThrottledTime}
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
}
func TestNoCpuStatFile(t *testing.T) {
helper := NewCgroupTestUtil("cpu", t)
defer helper.cleanup()
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal("Expected not to fail, but did")
}
}
func TestInvalidCpuStat(t *testing.T) {
helper := NewCgroupTestUtil("cpu", t)
defer helper.cleanup()
cpuStatContent := `nr_periods 2000
nr_throttled 200
throttled_time fortytwo`
helper.writeFileContents(map[string]string{
"cpu.stat": cpuStatContent,
})
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failed stat parsing.")
}
}

View file

@ -4,6 +4,7 @@ package fs
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -11,6 +12,7 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
)
type CpusetGroup struct {
@ -63,11 +65,6 @@ func (s *CpusetGroup) ApplyDir(dir string, cgroup *configs.Cgroup, pid int) erro
if err := s.ensureParent(dir, root); err != nil {
return err
}
// the default values inherit from parent cgroup are already set in
// s.ensureParent, cover these if we have our own
if err := s.Set(dir, cgroup); err != nil {
return err
}
// because we are not using d.join we need to place the pid into the procs file
// unlike the other subsystems
if err := writeFile(dir, "cgroup.procs", strconv.Itoa(pid)); err != nil {
@ -92,9 +89,13 @@ func (s *CpusetGroup) getSubsystemSettings(parent string) (cpus []byte, mems []b
// it's parent.
func (s *CpusetGroup) ensureParent(current, root string) error {
parent := filepath.Dir(current)
if filepath.Clean(parent) == root {
if libcontainerUtils.CleanPath(parent) == root {
return nil
}
// Avoid infinite recursion.
if parent == current {
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
}
if err := s.ensureParent(parent, root); err != nil {
return err
}

View file

@ -0,0 +1,65 @@
// +build linux
package fs
import (
"testing"
)
func TestCpusetSetCpus(t *testing.T) {
helper := NewCgroupTestUtil("cpuset", t)
defer helper.cleanup()
const (
cpusBefore = "0"
cpusAfter = "1-3"
)
helper.writeFileContents(map[string]string{
"cpuset.cpus": cpusBefore,
})
helper.CgroupData.config.Resources.CpusetCpus = cpusAfter
cpuset := &CpusetGroup{}
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.cpus")
if err != nil {
t.Fatalf("Failed to parse cpuset.cpus - %s", err)
}
if value != cpusAfter {
t.Fatal("Got the wrong value, set cpuset.cpus failed.")
}
}
func TestCpusetSetMems(t *testing.T) {
helper := NewCgroupTestUtil("cpuset", t)
defer helper.cleanup()
const (
memsBefore = "0"
memsAfter = "1"
)
helper.writeFileContents(map[string]string{
"cpuset.mems": memsBefore,
})
helper.CgroupData.config.Resources.CpusetMems = memsAfter
cpuset := &CpusetGroup{}
if err := cpuset.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "cpuset.mems")
if err != nil {
t.Fatalf("Failed to parse cpuset.mems - %s", err)
}
if value != memsAfter {
t.Fatal("Got the wrong value, set cpuset.mems failed.")
}
}

View file

@ -15,21 +15,29 @@ func (s *DevicesGroup) Name() string {
}
func (s *DevicesGroup) Apply(d *cgroupData) error {
dir, err := d.join("devices")
_, err := d.join("devices")
if err != nil {
// We will return error even it's `not found` error, devices
// cgroup is hard requirement for container's security.
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}
func (s *DevicesGroup) Set(path string, cgroup *configs.Cgroup) error {
devices := cgroup.Resources.Devices
if len(devices) > 0 {
for _, dev := range devices {
file := "devices.deny"
if dev.Allow {
file = "devices.allow"
}
if err := writeFile(path, file, dev.CgroupString()); err != nil {
return err
}
}
return nil
}
if !cgroup.Resources.AllowAllDevices {
if err := writeFile(path, "devices.deny", "a"); err != nil {
return err

View file

@ -0,0 +1,84 @@
// +build linux
package fs
import (
"testing"
"github.com/opencontainers/runc/libcontainer/configs"
)
var (
allowedDevices = []*configs.Device{
{
Path: "/dev/zero",
Type: 'c',
Major: 1,
Minor: 5,
Permissions: "rwm",
FileMode: 0666,
},
}
allowedList = "c 1:5 rwm"
deniedDevices = []*configs.Device{
{
Path: "/dev/null",
Type: 'c',
Major: 1,
Minor: 3,
Permissions: "rwm",
FileMode: 0666,
},
}
deniedList = "c 1:3 rwm"
)
func TestDevicesSetAllow(t *testing.T) {
helper := NewCgroupTestUtil("devices", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"devices.deny": "a",
})
helper.CgroupData.config.Resources.AllowAllDevices = false
helper.CgroupData.config.Resources.AllowedDevices = allowedDevices
devices := &DevicesGroup{}
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "devices.allow")
if err != nil {
t.Fatalf("Failed to parse devices.allow - %s", err)
}
if value != allowedList {
t.Fatal("Got the wrong value, set devices.allow failed.")
}
}
func TestDevicesSetDeny(t *testing.T) {
helper := NewCgroupTestUtil("devices", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"devices.allow": "a",
})
helper.CgroupData.config.Resources.AllowAllDevices = true
helper.CgroupData.config.Resources.DeniedDevices = deniedDevices
devices := &DevicesGroup{}
if err := devices.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "devices.deny")
if err != nil {
t.Fatalf("Failed to parse devices.deny - %s", err)
}
if value != deniedList {
t.Fatal("Got the wrong value, set devices.deny failed.")
}
}

View file

@ -19,15 +19,10 @@ func (s *FreezerGroup) Name() string {
}
func (s *FreezerGroup) Apply(d *cgroupData) error {
dir, err := d.join("freezer")
_, err := d.join("freezer")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,47 @@
// +build linux
package fs
import (
"testing"
"github.com/opencontainers/runc/libcontainer/configs"
)
func TestFreezerSetState(t *testing.T) {
helper := NewCgroupTestUtil("freezer", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"freezer.state": string(configs.Frozen),
})
helper.CgroupData.config.Resources.Freezer = configs.Thawed
freezer := &FreezerGroup{}
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "freezer.state")
if err != nil {
t.Fatalf("Failed to parse freezer.state - %s", err)
}
if value != string(configs.Thawed) {
t.Fatal("Got the wrong value, set freezer.state failed.")
}
}
func TestFreezerSetInvalidState(t *testing.T) {
helper := NewCgroupTestUtil("freezer", t)
defer helper.cleanup()
const (
invalidArg configs.FreezerState = "Invalid"
)
helper.CgroupData.config.Resources.Freezer = invalidArg
freezer := &FreezerGroup{}
if err := freezer.Set(helper.CgroupPath, helper.CgroupData.config); err == nil {
t.Fatal("Failed to return invalid argument error")
}
}

View file

@ -19,15 +19,10 @@ func (s *HugetlbGroup) Name() string {
}
func (s *HugetlbGroup) Apply(d *cgroupData) error {
dir, err := d.join("hugetlb")
_, err := d.join("hugetlb")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,154 @@
// +build linux
package fs
import (
"fmt"
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
const (
hugetlbUsageContents = "128\n"
hugetlbMaxUsageContents = "256\n"
hugetlbFailcnt = "100\n"
)
var (
usage = "hugetlb.%s.usage_in_bytes"
limit = "hugetlb.%s.limit_in_bytes"
maxUsage = "hugetlb.%s.max_usage_in_bytes"
failcnt = "hugetlb.%s.failcnt"
)
func TestHugetlbSetHugetlb(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
const (
hugetlbBefore = 256
hugetlbAfter = 512
)
for _, pageSize := range HugePageSizes {
helper.writeFileContents(map[string]string{
fmt.Sprintf(limit, pageSize): strconv.Itoa(hugetlbBefore),
})
}
for _, pageSize := range HugePageSizes {
helper.CgroupData.config.Resources.HugetlbLimit = []*configs.HugepageLimit{
{
Pagesize: pageSize,
Limit: hugetlbAfter,
},
}
hugetlb := &HugetlbGroup{}
if err := hugetlb.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
}
for _, pageSize := range HugePageSizes {
limit := fmt.Sprintf(limit, pageSize)
value, err := getCgroupParamUint(helper.CgroupPath, limit)
if err != nil {
t.Fatalf("Failed to parse %s - %s", limit, err)
}
if value != hugetlbAfter {
t.Fatalf("Set hugetlb.limit_in_bytes failed. Expected: %v, Got: %v", hugetlbAfter, value)
}
}
}
func TestHugetlbStats(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
for _, pageSize := range HugePageSizes {
helper.writeFileContents(map[string]string{
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
fmt.Sprintf(maxUsage, pageSize): hugetlbMaxUsageContents,
fmt.Sprintf(failcnt, pageSize): hugetlbFailcnt,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
for _, pageSize := range HugePageSizes {
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[pageSize])
}
}
func TestHugetlbStatsNoUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
maxUsage: hugetlbMaxUsageContents,
})
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsNoMaxUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
for _, pageSize := range HugePageSizes {
helper.writeFileContents(map[string]string{
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsBadUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
for _, pageSize := range HugePageSizes {
helper.writeFileContents(map[string]string{
fmt.Sprintf(usage, pageSize): "bad",
maxUsage: hugetlbMaxUsageContents,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsBadMaxUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("hugetlb", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
usage: hugetlbUsageContents,
maxUsage: "bad",
})
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}

View file

@ -32,8 +32,9 @@ func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
return err
}
}
if err := s.Set(path, d.config); err != nil {
// We have to set kernel memory here, as we can't change it once
// processes have been attached.
if err := s.SetKernelMemory(path, d.config); err != nil {
return err
}
}
@ -50,7 +51,17 @@ func (s *MemoryGroup) Apply(d *cgroupData) (err error) {
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *MemoryGroup) SetKernelMemory(path string, cgroup *configs.Cgroup) error {
// This has to be done separately because it has special constraints (it
// can't be done after there are processes attached to the cgroup).
if cgroup.Resources.KernelMemory > 0 {
if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
return err
}
}
return nil
}
@ -70,12 +81,6 @@ func (s *MemoryGroup) Set(path string, cgroup *configs.Cgroup) error {
return err
}
}
if cgroup.Resources.KernelMemory > 0 {
if err := writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(cgroup.Resources.KernelMemory, 10)); err != nil {
return err
}
}
if cgroup.Resources.OomKillDisable {
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
return err
@ -157,6 +162,7 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
usage := strings.Join([]string{moduleName, "usage_in_bytes"}, ".")
maxUsage := strings.Join([]string{moduleName, "max_usage_in_bytes"}, ".")
failcnt := strings.Join([]string{moduleName, "failcnt"}, ".")
limit := strings.Join([]string{moduleName, "limit_in_bytes"}, ".")
value, err := getCgroupParamUint(path, usage)
if err != nil {
@ -182,6 +188,14 @@ func getMemoryData(path, name string) (cgroups.MemoryData, error) {
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", failcnt, err)
}
memoryData.Failcnt = value
value, err = getCgroupParamUint(path, limit)
if err != nil {
if moduleName != "memory" && os.IsNotExist(err) {
return cgroups.MemoryData{}, nil
}
return cgroups.MemoryData{}, fmt.Errorf("failed to parse %s - %v", limit, err)
}
memoryData.Limit = value
return memoryData, nil
}

View file

@ -0,0 +1,339 @@
// +build linux
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
memoryStatContents = `cache 512
rss 1024`
memoryUsageContents = "2048\n"
memoryMaxUsageContents = "4096\n"
memoryFailcnt = "100\n"
memoryLimitContents = "8192\n"
)
func TestMemorySetMemory(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
const (
memoryBefore = 314572800 // 300M
memoryAfter = 524288000 // 500M
reservationBefore = 209715200 // 200M
reservationAfter = 314572800 // 300M
)
helper.writeFileContents(map[string]string{
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
"memory.soft_limit_in_bytes": strconv.Itoa(reservationBefore),
})
helper.CgroupData.config.Resources.Memory = memoryAfter
helper.CgroupData.config.Resources.MemoryReservation = reservationAfter
memory := &MemoryGroup{}
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "memory.limit_in_bytes")
if err != nil {
t.Fatalf("Failed to parse memory.limit_in_bytes - %s", err)
}
if value != memoryAfter {
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
}
value, err = getCgroupParamUint(helper.CgroupPath, "memory.soft_limit_in_bytes")
if err != nil {
t.Fatalf("Failed to parse memory.soft_limit_in_bytes - %s", err)
}
if value != reservationAfter {
t.Fatal("Got the wrong value, set memory.soft_limit_in_bytes failed.")
}
}
func TestMemorySetMemoryswap(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
const (
memoryswapBefore = 314572800 // 300M
memoryswapAfter = 524288000 // 500M
)
helper.writeFileContents(map[string]string{
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
})
helper.CgroupData.config.Resources.MemorySwap = memoryswapAfter
memory := &MemoryGroup{}
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "memory.memsw.limit_in_bytes")
if err != nil {
t.Fatalf("Failed to parse memory.memsw.limit_in_bytes - %s", err)
}
if value != memoryswapAfter {
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
}
}
func TestMemorySetKernelMemory(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
const (
kernelMemoryBefore = 314572800 // 300M
kernelMemoryAfter = 524288000 // 500M
)
helper.writeFileContents(map[string]string{
"memory.kmem.limit_in_bytes": strconv.Itoa(kernelMemoryBefore),
})
helper.CgroupData.config.Resources.KernelMemory = kernelMemoryAfter
memory := &MemoryGroup{}
if err := memory.SetKernelMemory(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "memory.kmem.limit_in_bytes")
if err != nil {
t.Fatalf("Failed to parse memory.kmem.limit_in_bytes - %s", err)
}
if value != kernelMemoryAfter {
t.Fatal("Got the wrong value, set memory.kmem.limit_in_bytes failed.")
}
}
func TestMemorySetMemorySwappinessDefault(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
const (
swappinessBefore = 60 //deafult is 60
swappinessAfter = 0
)
helper.writeFileContents(map[string]string{
"memory.swappiness": strconv.Itoa(swappinessBefore),
})
helper.CgroupData.config.Resources.Memory = swappinessAfter
memory := &MemoryGroup{}
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "memory.swappiness")
if err != nil {
t.Fatalf("Failed to parse memory.swappiness - %s", err)
}
if value != swappinessAfter {
t.Fatal("Got the wrong value, set memory.swappiness failed.")
}
}
func TestMemoryStats(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.failcnt": memoryFailcnt,
"memory.memsw.usage_in_bytes": memoryUsageContents,
"memory.memsw.max_usage_in_bytes": memoryMaxUsageContents,
"memory.memsw.failcnt": memoryFailcnt,
"memory.memsw.limit_in_bytes": memoryLimitContents,
"memory.kmem.usage_in_bytes": memoryUsageContents,
"memory.kmem.max_usage_in_bytes": memoryMaxUsageContents,
"memory.kmem.failcnt": memoryFailcnt,
"memory.kmem.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.MemoryStats{Cache: 512, Usage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, SwapUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, KernelUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192}, Stats: map[string]uint64{"cache": 512, "rss": 1024}}
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
}
func TestMemoryStatsNoStatFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err != nil {
t.Fatal(err)
}
}
func TestMemoryStatsNoUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsNoLimitInBytesFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadStatFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": "rss rss",
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": "bad",
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": "bad",
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadLimitInBytesFile(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": "bad",
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(helper.CgroupPath, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemorySetOomControl(t *testing.T) {
helper := NewCgroupTestUtil("memory", t)
defer helper.cleanup()
const (
oom_kill_disable = 1 // disable oom killer, default is 0
)
helper.writeFileContents(map[string]string{
"memory.oom_control": strconv.Itoa(oom_kill_disable),
})
memory := &MemoryGroup{}
if err := memory.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "memory.oom_control")
if err != nil {
t.Fatalf("Failed to parse memory.oom_control - %s", err)
}
if value != oom_kill_disable {
t.Fatalf("Got the wrong value, set memory.oom_control failed.")
}
}

View file

@ -15,15 +15,10 @@ func (s *NetClsGroup) Name() string {
}
func (s *NetClsGroup) Apply(d *cgroupData) error {
dir, err := d.join("net_cls")
_, err := d.join("net_cls")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,38 @@
// +build linux
package fs
import (
"testing"
)
const (
classidBefore = "0x100002"
classidAfter = "0x100001"
)
func TestNetClsSetClassid(t *testing.T) {
helper := NewCgroupTestUtil("net_cls", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"net_cls.classid": classidBefore,
})
helper.CgroupData.config.Resources.NetClsClassid = classidAfter
netcls := &NetClsGroup{}
if err := netcls.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
// As we are in mock environment, we can't get correct value of classid from
// net_cls.classid.
// So. we just judge if we successfully write classid into file
value, err := getCgroupParamString(helper.CgroupPath, "net_cls.classid")
if err != nil {
t.Fatalf("Failed to parse net_cls.classid - %s", err)
}
if value != classidAfter {
t.Fatal("Got the wrong value, set net_cls.classid failed.")
}
}

View file

@ -15,15 +15,10 @@ func (s *NetPrioGroup) Name() string {
}
func (s *NetPrioGroup) Apply(d *cgroupData) error {
dir, err := d.join("net_prio")
_, err := d.join("net_prio")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := s.Set(dir, d.config); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,38 @@
// +build linux
package fs
import (
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/configs"
)
var (
prioMap = []*configs.IfPrioMap{
{
Interface: "test",
Priority: 5,
},
}
)
func TestNetPrioSetIfPrio(t *testing.T) {
helper := NewCgroupTestUtil("net_prio", t)
defer helper.cleanup()
helper.CgroupData.config.Resources.NetPrioIfpriomap = prioMap
netPrio := &NetPrioGroup{}
if err := netPrio.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "net_prio.ifpriomap")
if err != nil {
t.Fatalf("Failed to parse net_prio.ifpriomap - %s", err)
}
if !strings.Contains(value, "test 5") {
t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.")
}
}

View file

@ -0,0 +1,57 @@
// +build linux
package fs
import (
"fmt"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type PidsGroup struct {
}
func (s *PidsGroup) Name() string {
return "pids"
}
func (s *PidsGroup) Apply(d *cgroupData) error {
_, err := d.join("pids")
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return nil
}
func (s *PidsGroup) Set(path string, cgroup *configs.Cgroup) error {
if cgroup.Resources.PidsLimit != 0 {
// "max" is the fallback value.
limit := "max"
if cgroup.Resources.PidsLimit > 0 {
limit = strconv.FormatInt(cgroup.Resources.PidsLimit, 10)
}
if err := writeFile(path, "pids.max", limit); err != nil {
return err
}
}
return nil
}
func (s *PidsGroup) Remove(d *cgroupData) error {
return removePath(d.path("pids"))
}
func (s *PidsGroup) GetStats(path string, stats *cgroups.Stats) error {
value, err := getCgroupParamUint(path, "pids.current")
if err != nil {
return fmt.Errorf("failed to parse pids.current - %s", err)
}
stats.PidsStats.Current = value
return nil
}

View file

@ -0,0 +1,83 @@
// +build linux
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
maxUnlimited = -1
maxLimited = 1024
)
func TestPidsSetMax(t *testing.T) {
helper := NewCgroupTestUtil("pids", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"pids.max": "max",
})
helper.CgroupData.config.Resources.PidsLimit = maxLimited
pids := &PidsGroup{}
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(helper.CgroupPath, "pids.max")
if err != nil {
t.Fatalf("Failed to parse pids.max - %s", err)
}
if value != maxLimited {
t.Fatalf("Expected %d, got %d for setting pids.max - limited", maxLimited, value)
}
}
func TestPidsSetUnlimited(t *testing.T) {
helper := NewCgroupTestUtil("pids", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"pids.max": strconv.Itoa(maxLimited),
})
helper.CgroupData.config.Resources.PidsLimit = maxUnlimited
pids := &PidsGroup{}
if err := pids.Set(helper.CgroupPath, helper.CgroupData.config); err != nil {
t.Fatal(err)
}
value, err := getCgroupParamString(helper.CgroupPath, "pids.max")
if err != nil {
t.Fatalf("Failed to parse pids.max - %s", err)
}
if value != "max" {
t.Fatalf("Expected %s, got %s for setting pids.max - unlimited", "max", value)
}
}
func TestPidsStats(t *testing.T) {
helper := NewCgroupTestUtil("pids", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"pids.current": strconv.Itoa(1337),
"pids.max": strconv.Itoa(maxLimited),
})
pids := &PidsGroup{}
stats := *cgroups.NewStats()
if err := pids.GetStats(helper.CgroupPath, &stats); err != nil {
t.Fatal(err)
}
if stats.PidsStats.Current != 1337 {
t.Fatalf("Expected %d, got %d for pids.current", 1337, stats.PidsStats.Current)
}
}

View file

@ -0,0 +1,117 @@
// +build linux
package fs
import (
"fmt"
"testing"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
if len(expected) != len(actual) {
return fmt.Errorf("blkioStatEntries length do not match")
}
for i, expValue := range expected {
actValue := actual[i]
if expValue != actValue {
return fmt.Errorf("Expected blkio stat entry %v but found %v", expValue, actValue)
}
}
return nil
}
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
logrus.Printf("blkio IoServiceBytesRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
logrus.Printf("blkio IoServicedRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
logrus.Printf("blkio IoQueuedRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
logrus.Printf("blkio SectorsRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
logrus.Printf("blkio IoServiceTimeRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
logrus.Printf("blkio IoWaitTimeRecursive do not match - %s\n", err)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
logrus.Printf("blkio IoMergedRecursive do not match - %v vs %v\n", expected.IoMergedRecursive, actual.IoMergedRecursive)
t.Fail()
}
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
logrus.Printf("blkio IoTimeRecursive do not match - %s\n", err)
t.Fail()
}
}
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
if expected != actual {
logrus.Printf("Expected throttling data %v but found %v\n", expected, actual)
t.Fail()
}
}
func expectHugetlbStatEquals(t *testing.T, expected, actual cgroups.HugetlbStats) {
if expected != actual {
logrus.Printf("Expected hugetlb stats %v but found %v\n", expected, actual)
t.Fail()
}
}
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
expectMemoryDataEquals(t, expected.Usage, actual.Usage)
expectMemoryDataEquals(t, expected.SwapUsage, actual.SwapUsage)
expectMemoryDataEquals(t, expected.KernelUsage, actual.KernelUsage)
for key, expValue := range expected.Stats {
actValue, ok := actual.Stats[key]
if !ok {
logrus.Printf("Expected memory stat key %s not found\n", key)
t.Fail()
}
if expValue != actValue {
logrus.Printf("Expected memory stat value %d but found %d\n", expValue, actValue)
t.Fail()
}
}
}
func expectMemoryDataEquals(t *testing.T, expected, actual cgroups.MemoryData) {
if expected.Usage != actual.Usage {
logrus.Printf("Expected memory usage %d but found %d\n", expected.Usage, actual.Usage)
t.Fail()
}
if expected.MaxUsage != actual.MaxUsage {
logrus.Printf("Expected memory max usage %d but found %d\n", expected.MaxUsage, actual.MaxUsage)
t.Fail()
}
if expected.Failcnt != actual.Failcnt {
logrus.Printf("Expected memory failcnt %d but found %d\n", expected.Failcnt, actual.Failcnt)
t.Fail()
}
if expected.Limit != actual.Limit {
logrus.Printf("Expected memory limit %d but found %d\n", expected.Limit, actual.Limit)
t.Fail()
}
}

View file

@ -0,0 +1,67 @@
// +build linux
/*
Utility for testing cgroup operations.
Creates a mock of the cgroup filesystem for the duration of the test.
*/
package fs
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/opencontainers/runc/libcontainer/configs"
)
type cgroupTestUtil struct {
// cgroup data to use in tests.
CgroupData *cgroupData
// Path to the mock cgroup directory.
CgroupPath string
// Temporary directory to store mock cgroup filesystem.
tempDir string
t *testing.T
}
// Creates a new test util for the specified subsystem
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
d := &cgroupData{
config: &configs.Cgroup{},
}
d.config.Resources = &configs.Resources{}
tempDir, err := ioutil.TempDir("", "cgroup_test")
if err != nil {
t.Fatal(err)
}
d.root = tempDir
testCgroupPath := filepath.Join(d.root, subsystem)
if err != nil {
t.Fatal(err)
}
// Ensure the full mock cgroup path exists.
err = os.MkdirAll(testCgroupPath, 0755)
if err != nil {
t.Fatal(err)
}
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
}
func (c *cgroupTestUtil) cleanup() {
os.RemoveAll(c.tempDir)
}
// Write the specified contents on the mock of the specified cgroup files.
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
for file, contents := range fileContents {
err := writeFile(c.CgroupPath, file, contents)
if err != nil {
c.t.Fatal(err)
}
}
}

View file

@ -0,0 +1,97 @@
// +build linux
package fs
import (
"io/ioutil"
"math"
"os"
"path/filepath"
"strconv"
"testing"
)
const (
cgroupFile = "cgroup.file"
floatValue = 2048.0
floatString = "2048"
)
func TestGetCgroupParamsInt(t *testing.T) {
// Setup tempdir.
tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, cgroupFile)
// Success.
err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
if err != nil {
t.Fatal(err)
}
value, err := getCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %d to equal %f", value, floatValue)
}
// Success with new line.
err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
if err != nil {
t.Fatal(err)
}
value, err = getCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %d to equal %f", value, floatValue)
}
// Success with negative values
err = ioutil.WriteFile(tempFile, []byte("-12345"), 0755)
if err != nil {
t.Fatal(err)
}
value, err = getCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != 0 {
t.Fatalf("Expected %d to equal %d", value, 0)
}
// Success with negative values lesser than min int64
s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64)
err = ioutil.WriteFile(tempFile, []byte(s), 0755)
if err != nil {
t.Fatal(err)
}
value, err = getCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != 0 {
t.Fatalf("Expected %d to equal %d", value, 0)
}
// Not a float.
err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
if err != nil {
t.Fatal(err)
}
_, err = getCgroupParamUint(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
// Unknown file.
err = os.Remove(tempFile)
if err != nil {
t.Fatal(err)
}
_, err = getCgroupParamUint(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
}

View file

@ -36,7 +36,9 @@ type MemoryData struct {
Usage uint64 `json:"usage,omitempty"`
MaxUsage uint64 `json:"max_usage,omitempty"`
Failcnt uint64 `json:"failcnt"`
Limit uint64 `json:"limit"`
}
type MemoryStats struct {
// memory used for cache
Cache uint64 `json:"cache,omitempty"`
@ -49,6 +51,11 @@ type MemoryStats struct {
Stats map[string]uint64 `json:"stats,omitempty"`
}
type PidsStats struct {
// number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
}
type BlkioStatEntry struct {
Major uint64 `json:"major,omitempty"`
Minor uint64 `json:"minor,omitempty"`
@ -80,6 +87,7 @@ type HugetlbStats struct {
type Stats struct {
CpuStats CpuStats `json:"cpu_stats,omitempty"`
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// the map is in the format "size of hugepage: stats of the hugepage"
HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"`

View file

@ -26,6 +26,10 @@ func (m *Manager) GetPids() ([]int, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) GetAllPids() ([]int, error) {
return nil, fmt.Errorf("Systemd not supported")
}
func (m *Manager) Destroy() error {
return fmt.Errorf("Systemd not supported")
}

View file

@ -55,6 +55,7 @@ var subsystems = subsystemSet{
&fs.MemoryGroup{},
&fs.CpuGroup{},
&fs.CpuacctGroup{},
&fs.PidsGroup{},
&fs.BlkioGroup{},
&fs.HugetlbGroup{},
&fs.PerfEventGroup{},
@ -167,6 +168,23 @@ func (m *Manager) Apply(pid int) error {
properties []systemdDbus.Property
)
if c.Paths != nil {
paths := make(map[string]string)
for name, path := range c.Paths {
_, err := getSubsystemPath(m.Cgroups, name)
if err != nil {
// Don't fail if a cgroup hierarchy was not found, just skip this subsystem
if cgroups.IsNotFound(err) {
continue
}
return err
}
paths[name] = path
}
m.Paths = paths
return cgroups.EnterPid(m.Paths, pid)
}
if c.Parent != "" {
slice = c.Parent
}
@ -233,7 +251,7 @@ func (m *Manager) Apply(pid int) error {
return err
}
// we need to manually join the freezer, net_cls, net_prio and cpuset cgroup in systemd
// we need to manually join the freezer, net_cls, net_prio, pids and cpuset cgroup in systemd
// because it does not currently support it via the dbus api.
if err := joinFreezer(c, pid); err != nil {
return err
@ -246,6 +264,10 @@ func (m *Manager) Apply(pid int) error {
return err
}
if err := joinPids(c, pid); err != nil {
return err
}
if err := joinCpuset(c, pid); err != nil {
return err
}
@ -277,17 +299,13 @@ func (m *Manager) Apply(pid int) error {
paths[s.Name()] = subsystemPath
}
m.Paths = paths
if paths["cpu"] != "" {
if err := fs.CheckCpushares(paths["cpu"], c.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
func (m *Manager) Destroy() error {
if m.Cgroups.Paths != nil {
return nil
}
m.mu.Lock()
defer m.mu.Unlock()
theConn.StopUnit(getUnitName(m.Cgroups), "replace", nil)
@ -330,68 +348,74 @@ func join(c *configs.Cgroup, subsystem string, pid int) (string, error) {
}
func joinCpu(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "cpu")
_, err := join(c, "cpu", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if c.Resources.CpuQuota != 0 {
if err = writeFile(path, "cpu.cfs_quota_us", strconv.FormatInt(c.Resources.CpuQuota, 10)); err != nil {
return err
}
}
if c.Resources.CpuPeriod != 0 {
if err = writeFile(path, "cpu.cfs_period_us", strconv.FormatInt(c.Resources.CpuPeriod, 10)); err != nil {
return err
}
}
if c.Resources.CpuRtPeriod != 0 {
if err = writeFile(path, "cpu.rt_period_us", strconv.FormatInt(c.Resources.CpuRtPeriod, 10)); err != nil {
return err
}
}
if c.Resources.CpuRtRuntime != 0 {
if err = writeFile(path, "cpu.rt_runtime_us", strconv.FormatInt(c.Resources.CpuRtRuntime, 10)); err != nil {
return err
}
}
return nil
}
func joinFreezer(c *configs.Cgroup, pid int) error {
path, err := join(c, "freezer", pid)
_, err := join(c, "freezer", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
freezer, err := subsystems.Get("freezer")
if err != nil {
return err
}
return freezer.Set(path, c)
return nil
}
func joinNetPrio(c *configs.Cgroup, pid int) error {
path, err := join(c, "net_prio", pid)
_, err := join(c, "net_prio", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
netPrio, err := subsystems.Get("net_prio")
if err != nil {
return err
}
return netPrio.Set(path, c)
return nil
}
func joinNetCls(c *configs.Cgroup, pid int) error {
path, err := join(c, "net_cls", pid)
_, err := join(c, "net_cls", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
netcls, err := subsystems.Get("net_cls")
if err != nil {
return nil
}
func joinPids(c *configs.Cgroup, pid int) error {
_, err := join(c, "pids", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
return netcls.Set(path, c)
return nil
}
// systemd represents slice heirarchy using `-`, so we need to follow suit when
// generating the path of slice. Essentially, test-a-b.slice becomes
// test.slice/test-a.slice/test-a-b.slice.
func expandSlice(slice string) (string, error) {
suffix := ".slice"
// Name has to end with ".slice", but can't be just ".slice".
if len(slice) < len(suffix) || !strings.HasSuffix(slice, suffix) {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Path-separators are not allowed.
if strings.Contains(slice, "/") {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
var path, prefix string
sliceName := strings.TrimSuffix(slice, suffix)
for _, component := range strings.Split(sliceName, "-") {
// test--a.slice isn't permitted, nor is -test.slice.
if component == "" {
return "", fmt.Errorf("invalid slice name: %s", slice)
}
// Append the component to the path and to the prefix.
path += prefix + component + suffix + "/"
prefix += component + "-"
}
return path, nil
}
func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
@ -410,6 +434,11 @@ func getSubsystemPath(c *configs.Cgroup, subsystem string) (string, error) {
slice = c.Parent
}
slice, err = expandSlice(slice)
if err != nil {
return "", err
}
return filepath.Join(mountpoint, initPath, slice, getUnitName(c)), nil
}
@ -440,6 +469,14 @@ func (m *Manager) GetPids() ([]int, error) {
return cgroups.GetPids(path)
}
func (m *Manager) GetAllPids() ([]int, error) {
path, err := getSubsystemPath(m.Cgroups, "devices")
if err != nil {
return nil, err
}
return cgroups.GetAllPids(path)
}
func (m *Manager) GetStats() (*cgroups.Stats, error) {
m.mu.Lock()
defer m.mu.Unlock()
@ -458,16 +495,23 @@ func (m *Manager) GetStats() (*cgroups.Stats, error) {
}
func (m *Manager) Set(container *configs.Config) error {
for name, path := range m.Paths {
sys, err := subsystems.Get(name)
if err == errSubsystemDoesNotExist || !cgroups.PathExists(path) {
continue
for _, sys := range subsystems {
// Get the subsystem path, but don't error out for not found cgroups.
path, err := getSubsystemPath(container.Cgroups, sys.Name())
if err != nil && !cgroups.IsNotFound(err) {
return err
}
if err := sys.Set(path, container.Cgroups); err != nil {
return err
}
}
if m.Paths["cpu"] != "" {
if err := fs.CheckCpushares(m.Paths["cpu"], container.Cgroups.Resources.CpuShares); err != nil {
return err
}
}
return nil
}
@ -487,17 +531,13 @@ func getUnitName(c *configs.Cgroup) string {
// because systemd will re-write the device settings if it needs to re-apply the cgroup context.
// This happens at least for v208 when any sibling unit is started.
func joinDevices(c *configs.Cgroup, pid int) error {
path, err := join(c, "devices", pid)
_, err := join(c, "devices", pid)
// Even if it's `not found` error, we'll return err because devices cgroup
// is hard requirement for container security.
if err != nil {
return err
}
devices, err := subsystems.Get("devices")
if err != nil {
return err
}
return devices.Set(path, c)
return nil
}
func setKernelMemory(c *configs.Cgroup) error {
@ -510,52 +550,16 @@ func setKernelMemory(c *configs.Cgroup) error {
return err
}
if c.Resources.KernelMemory > 0 {
err = writeFile(path, "memory.kmem.limit_in_bytes", strconv.FormatInt(c.Resources.KernelMemory, 10))
if err != nil {
return err
}
}
return nil
// This doesn't get called by manager.Set, so we need to do it here.
s := &fs.MemoryGroup{}
return s.SetKernelMemory(path, c)
}
func joinMemory(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "memory")
_, err := join(c, "memory", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
// -1 disables memoryswap
if c.Resources.MemorySwap > 0 {
err = writeFile(path, "memory.memsw.limit_in_bytes", strconv.FormatInt(c.Resources.MemorySwap, 10))
if err != nil {
return err
}
}
if c.Resources.MemoryReservation > 0 {
err = writeFile(path, "memory.soft_limit_in_bytes", strconv.FormatInt(c.Resources.MemoryReservation, 10))
if err != nil {
return err
}
}
if c.Resources.OomKillDisable {
if err := writeFile(path, "memory.oom_control", "1"); err != nil {
return err
}
}
if c.Resources.MemorySwappiness >= 0 && c.Resources.MemorySwappiness <= 100 {
err = writeFile(path, "memory.swappiness", strconv.FormatInt(c.Resources.MemorySwappiness, 10))
if err != nil {
return err
}
} else if c.Resources.MemorySwappiness == -1 {
return nil
} else {
return fmt.Errorf("invalid value:%d. valid memory swappiness range is 0-100", c.Resources.MemorySwappiness)
}
return nil
}
@ -577,68 +581,25 @@ func joinCpuset(c *configs.Cgroup, pid int) error {
// expects device path instead of major minor numbers, which is also confusing
// for users. So we use fs work around for now.
func joinBlkio(c *configs.Cgroup, pid int) error {
path, err := getSubsystemPath(c, "blkio")
_, err := join(c, "blkio", pid)
if err != nil {
return err
}
// systemd doesn't directly support this in the dbus properties
if c.Resources.BlkioLeafWeight != 0 {
if err := writeFile(path, "blkio.leaf_weight", strconv.FormatUint(uint64(c.Resources.BlkioLeafWeight), 10)); err != nil {
return err
}
}
for _, wd := range c.Resources.BlkioWeightDevice {
if err := writeFile(path, "blkio.weight_device", wd.WeightString()); err != nil {
return err
}
if err := writeFile(path, "blkio.leaf_weight_device", wd.LeafWeightString()); err != nil {
return err
}
}
for _, td := range c.Resources.BlkioThrottleReadBpsDevice {
if err := writeFile(path, "blkio.throttle.read_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range c.Resources.BlkioThrottleWriteBpsDevice {
if err := writeFile(path, "blkio.throttle.write_bps_device", td.String()); err != nil {
return err
}
}
for _, td := range c.Resources.BlkioThrottleReadIOPSDevice {
if err := writeFile(path, "blkio.throttle.read_iops_device", td.String()); err != nil {
return err
}
}
for _, td := range c.Resources.BlkioThrottleWriteIOPSDevice {
if err := writeFile(path, "blkio.throttle.write_iops_device", td.String()); err != nil {
return err
}
}
return nil
}
func joinHugetlb(c *configs.Cgroup, pid int) error {
path, err := join(c, "hugetlb", pid)
_, err := join(c, "hugetlb", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
hugetlb, err := subsystems.Get("hugetlb")
if err != nil {
return err
}
return hugetlb.Set(path, c)
return nil
}
func joinPerfEvent(c *configs.Cgroup, pid int) error {
path, err := join(c, "perf_event", pid)
_, err := join(c, "perf_event", pid)
if err != nil && !cgroups.IsNotFound(err) {
return err
}
perfEvent, err := subsystems.Get("perf_event")
if err != nil {
return err
}
return perfEvent.Set(path, c)
return nil
}

View file

@ -5,6 +5,7 @@ package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@ -12,7 +13,6 @@ import (
"strings"
"time"
"github.com/docker/docker/pkg/mount"
"github.com/docker/go-units"
)
@ -84,10 +84,19 @@ func FindCgroupMountpointDir() (string, error) {
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Fields(text[index+3:])
if len(postSeparatorFields) < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
numPostFields := len(postSeparatorFields)
// This is an error as we can't detect if the mount is for "cgroup"
if numPostFields == 0 {
return "", fmt.Errorf("Found no fields post '-' in %q", text)
}
if postSeparatorFields[0] == "cgroup" {
// Check that the mount is properly formated.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
return filepath.Dir(fields[4]), nil
}
}
@ -112,11 +121,45 @@ func (m Mount) GetThisCgroupDir(cgroups map[string]string) (string, error) {
return getControllerPath(m.Subsystems[0], cgroups)
}
func getCgroupMountsHelper(ss map[string]bool, mi io.Reader) ([]Mount, error) {
res := make([]Mount, 0, len(ss))
scanner := bufio.NewScanner(mi)
for scanner.Scan() {
txt := scanner.Text()
sepIdx := strings.IndexByte(txt, '-')
if sepIdx == -1 {
return nil, fmt.Errorf("invalid mountinfo format")
}
if txt[sepIdx+2:sepIdx+8] != "cgroup" {
continue
}
fields := strings.Split(txt, " ")
m := Mount{
Mountpoint: fields[4],
Root: fields[3],
}
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if strings.HasPrefix(opt, cgroupNamePrefix) {
m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
}
if ss[opt] {
m.Subsystems = append(m.Subsystems, opt)
}
}
res = append(res, m)
}
if err := scanner.Err(); err != nil {
return nil, err
}
return res, nil
}
func GetCgroupMounts() ([]Mount, error) {
mounts, err := mount.GetMounts()
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
all, err := GetAllSubsystems()
if err != nil {
@ -127,24 +170,7 @@ func GetCgroupMounts() ([]Mount, error) {
for _, s := range all {
allMap[s] = true
}
res := []Mount{}
for _, mount := range mounts {
if mount.Fstype == "cgroup" {
m := Mount{Mountpoint: mount.Mountpoint, Root: mount.Root}
for _, opt := range strings.Split(mount.VfsOpts, ",") {
if strings.HasPrefix(opt, cgroupNamePrefix) {
m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):])
}
if allMap[opt] {
m.Subsystems = append(m.Subsystems, opt)
}
}
res = append(res, m)
}
}
return res, nil
return getCgroupMountsHelper(allMap, f)
}
// Returns all the cgroup subsystems supported by the kernel
@ -323,9 +349,14 @@ func GetHugePageSize() ([]string, error) {
return pageSizes, nil
}
// GetPids returns all pids, that were added to cgroup at path and to all its
// subcgroups.
// GetPids returns all pids, that were added to cgroup at path.
func GetPids(path string) ([]int, error) {
return readProcsFile(path)
}
// GetAllPids returns all pids, that were added to cgroup at path and to all its
// subcgroups.
func GetAllPids(path string) ([]int, error) {
var pids []int
// collect pids from all sub-cgroups
err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error {

View file

@ -0,0 +1,138 @@
package cgroups
import (
"bytes"
"strings"
"testing"
)
const fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
func TestGetCgroupMounts(t *testing.T) {
subsystems := map[string]bool{
"cpuset": true,
"cpu": true,
"cpuacct": true,
"memory": true,
"devices": true,
"freezer": true,
"net_cls": true,
"blkio": true,
"perf_event": true,
"hugetlb": true,
}
mi := bytes.NewBufferString(fedoraMountinfo)
cgMounts, err := getCgroupMountsHelper(subsystems, mi)
if err != nil {
t.Fatal(err)
}
cgMap := make(map[string]Mount)
for _, m := range cgMounts {
for _, ss := range m.Subsystems {
cgMap[ss] = m
}
}
for ss := range subsystems {
m, ok := cgMap[ss]
if !ok {
t.Fatalf("%s not found", ss)
}
if m.Root != "/" {
t.Fatalf("unexpected root for %s: %s", ss, m.Root)
}
if !strings.HasPrefix(m.Mountpoint, "/sys/fs/cgroup/") && !strings.Contains(m.Mountpoint, ss) {
t.Fatalf("unexpected mountpoint for %s: %s", ss, m.Mountpoint)
}
var ssFound bool
for _, mss := range m.Subsystems {
if mss == ss {
ssFound = true
break
}
}
if !ssFound {
t.Fatalf("subsystem %s not found in Subsystems field %v", ss, m.Subsystems)
}
}
}
func BenchmarkGetCgroupMounts(b *testing.B) {
subsystems := map[string]bool{
"cpuset": true,
"cpu": true,
"cpuacct": true,
"memory": true,
"devices": true,
"freezer": true,
"net_cls": true,
"blkio": true,
"perf_event": true,
"hugetlb": true,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
mi := bytes.NewBufferString(fedoraMountinfo)
b.StartTimer()
if _, err := getCgroupMountsHelper(subsystems, mi); err != nil {
b.Fatal(err)
}
}
}

View file

@ -11,25 +11,38 @@ const (
)
type Cgroup struct {
Name string `json:"name"`
// Deprecated, use Path instead
Name string `json:"name,omitempty"`
// name of parent cgroup or slice
Parent string `json:"parent"`
// name of parent of cgroup or slice
// Deprecated, use Path instead
Parent string `json:"parent,omitempty"`
// Path specifies the path to cgroups that are created and/or joined by the container.
// The path is assumed to be relative to the host system cgroup mountpoint.
Path string `json:"path"`
// ScopePrefix decribes prefix for the scope name
ScopePrefix string `json:"scope_prefix"`
// Paths represent the absolute cgroups paths to join.
// This takes precedence over Path.
Paths map[string]string
// Resources contains various cgroups settings to apply
Resources *Resources `json:"resources"`
*Resources
}
type Resources struct {
// If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list.
AllowAllDevices bool `json:"allow_all_devices"`
// Deprecated
AllowAllDevices bool `json:"allow_all_devices,omitempty"`
// Deprecated
AllowedDevices []*Device `json:"allowed_devices,omitempty"`
// Deprecated
DeniedDevices []*Device `json:"denied_devices,omitempty"`
AllowedDevices []*Device `json:"allowed_devices"`
DeniedDevices []*Device `json:"denied_devices"`
Devices []*Device `json:"devices"`
// Memory limit (in bytes)
Memory int64 `json:"memory"`
@ -37,7 +50,7 @@ type Resources struct {
// Memory reservation or soft_limit (in bytes)
MemoryReservation int64 `json:"memory_reservation"`
// Total memory usage (memory + swap); set `-1' to disable swap
// Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwap int64 `json:"memory_swap"`
// Kernel memory limit (in bytes)
@ -64,6 +77,9 @@ type Resources struct {
// MEM to use
CpusetMems string `json:"cpuset_mems"`
// Process limit; set <= `0' to disable limit.
PidsLimit int64 `json:"pids_limit"`
// Specifies per cgroup weight, range is from 10 to 1000.
BlkioWeight uint16 `json:"blkio_weight"`

View file

@ -0,0 +1,156 @@
// +build linux freebsd
package configs
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
)
// Checks whether the expected capability is specified in the capabilities.
func contains(expected string, values []string) bool {
for _, v := range values {
if v == expected {
return true
}
}
return false
}
func containsDevice(expected *Device, values []*Device) bool {
for _, d := range values {
if d.Path == expected.Path &&
d.Permissions == expected.Permissions &&
d.FileMode == expected.FileMode &&
d.Major == expected.Major &&
d.Minor == expected.Minor &&
d.Type == expected.Type {
return true
}
}
return false
}
func loadConfig(name string) (*Config, error) {
f, err := os.Open(filepath.Join("../sample_configs", name))
if err != nil {
return nil, err
}
defer f.Close()
var container *Config
if err := json.NewDecoder(f).Decode(&container); err != nil {
return nil, err
}
// Check that a config doesn't contain extra fields
var configMap, abstractMap map[string]interface{}
if _, err := f.Seek(0, 0); err != nil {
return nil, err
}
if err := json.NewDecoder(f).Decode(&abstractMap); err != nil {
return nil, err
}
configData, err := json.Marshal(&container)
if err != nil {
return nil, err
}
if err := json.Unmarshal(configData, &configMap); err != nil {
return nil, err
}
for k := range configMap {
delete(abstractMap, k)
}
if len(abstractMap) != 0 {
return nil, fmt.Errorf("unknown fields: %s", abstractMap)
}
return container, nil
}
func TestRemoveNamespace(t *testing.T) {
ns := Namespaces{
{Type: NEWNET},
}
if !ns.Remove(NEWNET) {
t.Fatal("NEWNET was not removed")
}
if len(ns) != 0 {
t.Fatalf("namespaces should have 0 items but reports %d", len(ns))
}
}
func TestHostUIDNoUSERNS(t *testing.T) {
config := &Config{
Namespaces: Namespaces{},
}
uid, err := config.HostUID()
if err != nil {
t.Fatal(err)
}
if uid != 0 {
t.Fatalf("expected uid 0 with no USERNS but received %d", uid)
}
}
func TestHostUIDWithUSERNS(t *testing.T) {
config := &Config{
Namespaces: Namespaces{{Type: NEWUSER}},
UidMappings: []IDMap{
{
ContainerID: 0,
HostID: 1000,
Size: 1,
},
},
}
uid, err := config.HostUID()
if err != nil {
t.Fatal(err)
}
if uid != 1000 {
t.Fatalf("expected uid 1000 with no USERNS but received %d", uid)
}
}
func TestHostGIDNoUSERNS(t *testing.T) {
config := &Config{
Namespaces: Namespaces{},
}
uid, err := config.HostGID()
if err != nil {
t.Fatal(err)
}
if uid != 0 {
t.Fatalf("expected gid 0 with no USERNS but received %d", uid)
}
}
func TestHostGIDWithUSERNS(t *testing.T) {
config := &Config{
Namespaces: Namespaces{{Type: NEWUSER}},
GidMappings: []IDMap{
{
ContainerID: 0,
HostID: 1000,
Size: 1,
},
},
}
uid, err := config.HostGID()
if err != nil {
t.Fatal(err)
}
if uid != 1000 {
t.Fatalf("expected gid 1000 with no USERNS but received %d", uid)
}
}

View file

@ -0,0 +1,3 @@
package configs
// All current tests are for Unix-specific functionality

View file

@ -35,6 +35,9 @@ type Device struct {
// Gid of the device.
Gid uint32 `json:"gid"`
// Write the file to the allowed list
Allow bool `json:"allow"`
}
func (d *Device) CgroupString() string {

View file

@ -82,20 +82,6 @@ var (
Minor: 1,
Permissions: "rwm",
},
{
Path: "/dev/tty0",
Type: 'c',
Major: 4,
Minor: 0,
Permissions: "rwm",
},
{
Path: "/dev/tty1",
Type: 'c',
Major: 4,
Minor: 1,
Permissions: "rwm",
},
// /dev/pts/ - pts namespaces are "coming soon"
{
Path: "",

View file

@ -6,6 +6,7 @@ package libcontainer
import (
"os"
"time"
"github.com/opencontainers/runc/libcontainer/configs"
)
@ -14,8 +15,11 @@ import (
type Status int
const (
// The container exists but has not been run yet
Created Status = iota
// The container exists and is running.
Running Status = iota + 1
Running
// The container exists, it is in the process of being paused.
Pausing
@ -32,6 +36,8 @@ const (
func (s Status) String() string {
switch s {
case Created:
return "created"
case Running:
return "running"
case Pausing:
@ -43,7 +49,7 @@ func (s Status) String() string {
case Destroyed:
return "destroyed"
default:
return "undefined"
return "unknown"
}
}
@ -56,9 +62,12 @@ type BaseState struct {
// InitProcessPid is the init process id in the parent namespace.
InitProcessPid int `json:"init_process_pid"`
// InitProcessStartTime is the init process start time.
// InitProcessStartTime is the init process start time in clock cycles since boot time.
InitProcessStartTime string `json:"init_process_start"`
// Created is the unix timestamp for the creation time of the container in UTC
Created time.Time `json:"created"`
// Config is the container's configuration.
Config configs.Config `json:"config"`
}

View file

@ -15,12 +15,14 @@ import (
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/golang/protobuf/proto"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/criurpc"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/vishvananda/netlink/nl"
)
@ -38,6 +40,7 @@ type linuxContainer struct {
m sync.Mutex
criuVersion int
state containerState
created time.Time
}
// State represents a running container's state
@ -104,6 +107,12 @@ type Container interface {
// errors:
// Systemerror - System error.
NotifyOOM() (<-chan struct{}, error)
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
//
// errors:
// Systemerror - System error.
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
}
// ID returns the container's unique ID
@ -129,7 +138,7 @@ func (c *linuxContainer) State() (*State, error) {
}
func (c *linuxContainer) Processes() ([]int, error) {
pids, err := c.cgroupManager.GetPids()
pids, err := c.cgroupManager.GetAllPids()
if err != nil {
return nil, newSystemError(err)
}
@ -183,29 +192,30 @@ func (c *linuxContainer) Start(process *Process) error {
}
return newSystemError(err)
}
// generate a timestamp indicating when the container was started
c.created = time.Now().UTC()
c.state = &runningState{
c: c,
}
if doInit {
if err := c.updateState(parent); err != nil {
return err
}
} else {
c.state.transition(&nullState{
c: c,
s: Running,
})
}
if c.config.Hooks != nil {
s := configs.HookState{
Version: c.config.Version,
ID: c.id,
Pid: parent.pid(),
Root: c.config.Rootfs,
}
for _, hook := range c.config.Hooks.Poststart {
if err := hook.Run(s); err != nil {
if err := parent.terminate(); err != nil {
logrus.Warn(err)
if c.config.Hooks != nil {
s := configs.HookState{
Version: c.config.Version,
ID: c.id,
Pid: parent.pid(),
Root: c.config.Rootfs,
}
for _, hook := range c.config.Hooks.Poststart {
if err := hook.Run(s); err != nil {
if err := parent.terminate(); err != nil {
logrus.Warn(err)
}
return newSystemError(err)
}
return newSystemError(err)
}
}
}
@ -258,7 +268,7 @@ func (c *linuxContainer) commandTemplate(p *Process, childPipe *os.File) (*exec.
}
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*initProcess, error) {
t := "_LIBCONTAINER_INITTYPE=standard"
t := "_LIBCONTAINER_INITTYPE=" + string(initStandard)
cloneFlags := c.config.Namespaces.CloneFlags()
if cloneFlags&syscall.CLONE_NEWUSER != 0 {
if err := c.addUidGidMappings(cmd.SysProcAttr); err != nil {
@ -285,7 +295,7 @@ func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, parentPipe, c
}
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, parentPipe, childPipe *os.File) (*setnsProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE=setns")
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
// for setns process, we dont have to set cloneflags as the process namespaces
// will only be set via setns syscall
data, err := c.bootstrapData(0, c.initProcess.pid(), p.consolePath)
@ -334,6 +344,13 @@ func (c *linuxContainer) Destroy() error {
func (c *linuxContainer) Pause() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Running {
return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
}
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
return err
}
@ -345,6 +362,13 @@ func (c *linuxContainer) Pause() error {
func (c *linuxContainer) Resume() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Paused {
return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
}
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
@ -357,6 +381,10 @@ func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
return notifyOnOOM(c.cgroupManager.GetPaths())
}
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
return notifyMemoryPressure(c.cgroupManager.GetPaths(), level)
}
// XXX debug support, remove when debugging done.
func addArgsFromEnv(evar string, args *[]string) {
if e := os.Getenv(evar); e != "" {
@ -929,9 +957,6 @@ func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Proc
func (c *linuxContainer) updateState(process parentProcess) error {
c.initProcess = process
if err := c.refreshState(); err != nil {
return err
}
state, err := c.currentState()
if err != nil {
return err
@ -945,7 +970,7 @@ func (c *linuxContainer) saveState(s *State) error {
return err
}
defer f.Close()
return json.NewEncoder(f).Encode(s)
return utils.WriteJSON(f, s)
}
func (c *linuxContainer) deleteState() error {
@ -1007,35 +1032,37 @@ func (c *linuxContainer) isPaused() (bool, error) {
}
func (c *linuxContainer) currentState() (*State, error) {
status, err := c.currentStatus()
if err != nil {
return nil, err
}
if status == Destroyed {
return nil, newGenericError(fmt.Errorf("container destroyed"), ContainerNotExists)
}
startTime, err := c.initProcess.startTime()
if err != nil {
return nil, newSystemError(err)
var (
startTime string
externalDescriptors []string
pid = -1
)
if c.initProcess != nil {
pid = c.initProcess.pid()
startTime, _ = c.initProcess.startTime()
externalDescriptors = c.initProcess.externalDescriptors()
}
state := &State{
BaseState: BaseState{
ID: c.ID(),
Config: *c.config,
InitProcessPid: c.initProcess.pid(),
InitProcessPid: pid,
InitProcessStartTime: startTime,
Created: c.created,
},
CgroupPaths: c.cgroupManager.GetPaths(),
NamespacePaths: make(map[configs.NamespaceType]string),
ExternalDescriptors: c.initProcess.externalDescriptors(),
ExternalDescriptors: externalDescriptors,
}
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid())
}
for _, nsType := range configs.NamespaceTypes() {
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(c.initProcess.pid())
if pid > 0 {
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
}
}
return state, nil

View file

@ -0,0 +1,218 @@
// +build linux
package libcontainer
import (
"fmt"
"os"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
)
type mockCgroupManager struct {
pids []int
allPids []int
stats *cgroups.Stats
paths map[string]string
}
func (m *mockCgroupManager) GetPids() ([]int, error) {
return m.pids, nil
}
func (m *mockCgroupManager) GetAllPids() ([]int, error) {
return m.allPids, nil
}
func (m *mockCgroupManager) GetStats() (*cgroups.Stats, error) {
return m.stats, nil
}
func (m *mockCgroupManager) Apply(pid int) error {
return nil
}
func (m *mockCgroupManager) Set(container *configs.Config) error {
return nil
}
func (m *mockCgroupManager) Destroy() error {
return nil
}
func (m *mockCgroupManager) GetPaths() map[string]string {
return m.paths
}
func (m *mockCgroupManager) Freeze(state configs.FreezerState) error {
return nil
}
type mockProcess struct {
_pid int
started string
}
func (m *mockProcess) terminate() error {
return nil
}
func (m *mockProcess) pid() int {
return m._pid
}
func (m *mockProcess) startTime() (string, error) {
return m.started, nil
}
func (m *mockProcess) start() error {
return nil
}
func (m *mockProcess) wait() (*os.ProcessState, error) {
return nil, nil
}
func (m *mockProcess) signal(_ os.Signal) error {
return nil
}
func (p *mockProcess) externalDescriptors() []string {
return []string{}
}
func (p *mockProcess) setExternalDescriptors(newFds []string) {
}
func TestGetContainerPids(t *testing.T) {
container := &linuxContainer{
id: "myid",
config: &configs.Config{},
cgroupManager: &mockCgroupManager{allPids: []int{1, 2, 3}},
}
pids, err := container.Processes()
if err != nil {
t.Fatal(err)
}
for i, expected := range []int{1, 2, 3} {
if pids[i] != expected {
t.Fatalf("expected pid %d but received %d", expected, pids[i])
}
}
}
func TestGetContainerStats(t *testing.T) {
container := &linuxContainer{
id: "myid",
config: &configs.Config{},
cgroupManager: &mockCgroupManager{
pids: []int{1, 2, 3},
stats: &cgroups.Stats{
MemoryStats: cgroups.MemoryStats{
Usage: cgroups.MemoryData{
Usage: 1024,
},
},
},
},
}
stats, err := container.Stats()
if err != nil {
t.Fatal(err)
}
if stats.CgroupStats == nil {
t.Fatal("cgroup stats are nil")
}
if stats.CgroupStats.MemoryStats.Usage.Usage != 1024 {
t.Fatalf("expected memory usage 1024 but recevied %d", stats.CgroupStats.MemoryStats.Usage.Usage)
}
}
func TestGetContainerState(t *testing.T) {
var (
pid = os.Getpid()
expectedMemoryPath = "/sys/fs/cgroup/memory/myid"
expectedNetworkPath = "/networks/fd"
)
container := &linuxContainer{
id: "myid",
config: &configs.Config{
Namespaces: []configs.Namespace{
{Type: configs.NEWPID},
{Type: configs.NEWNS},
{Type: configs.NEWNET, Path: expectedNetworkPath},
{Type: configs.NEWUTS},
// emulate host for IPC
//{Type: configs.NEWIPC},
},
},
initProcess: &mockProcess{
_pid: pid,
started: "010",
},
cgroupManager: &mockCgroupManager{
pids: []int{1, 2, 3},
stats: &cgroups.Stats{
MemoryStats: cgroups.MemoryStats{
Usage: cgroups.MemoryData{
Usage: 1024,
},
},
},
paths: map[string]string{
"memory": expectedMemoryPath,
},
},
}
container.state = &createdState{c: container}
state, err := container.State()
if err != nil {
t.Fatal(err)
}
if state.InitProcessPid != pid {
t.Fatalf("expected pid %d but received %d", pid, state.InitProcessPid)
}
if state.InitProcessStartTime != "010" {
t.Fatalf("expected process start time 010 but received %s", state.InitProcessStartTime)
}
paths := state.CgroupPaths
if paths == nil {
t.Fatal("cgroup paths should not be nil")
}
if memPath := paths["memory"]; memPath != expectedMemoryPath {
t.Fatalf("expected memory path %q but received %q", expectedMemoryPath, memPath)
}
for _, ns := range container.config.Namespaces {
path := state.NamespacePaths[ns.Type]
if path == "" {
t.Fatalf("expected non nil namespace path for %s", ns.Type)
}
if ns.Type == configs.NEWNET {
if path != expectedNetworkPath {
t.Fatalf("expected path %q but received %q", expectedNetworkPath, path)
}
} else {
file := ""
switch ns.Type {
case configs.NEWNET:
file = "net"
case configs.NEWNS:
file = "mnt"
case configs.NEWPID:
file = "pid"
case configs.NEWIPC:
file = "ipc"
case configs.NEWUSER:
file = "user"
case configs.NEWUTS:
file = "uts"
}
expected := fmt.Sprintf("/proc/%d/ns/%s", pid, file)
if expected != path {
t.Fatalf("expected path %q but received %q", expected, path)
}
}
}
}

View file

@ -0,0 +1,63 @@
// +build linux freebsd
package devices
import (
"errors"
"os"
"testing"
)
func TestDeviceFromPathLstatFailure(t *testing.T) {
testError := errors.New("test error")
// Override os.Lstat to inject error.
osLstat = func(path string) (os.FileInfo, error) {
return nil, testError
}
_, err := DeviceFromPath("", "")
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}
func TestHostDevicesIoutilReadDirFailure(t *testing.T) {
testError := errors.New("test error")
// Override ioutil.ReadDir to inject error.
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
return nil, testError
}
_, err := HostDevices()
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}
func TestHostDevicesIoutilReadDirDeepFailure(t *testing.T) {
testError := errors.New("test error")
called := false
// Override ioutil.ReadDir to inject error after the first call.
ioutilReadDir = func(dirname string) ([]os.FileInfo, error) {
if called {
return nil, testError
}
called = true
// Provoke a second call.
fi, err := os.Lstat("/tmp")
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
return []os.FileInfo{fi}, nil
}
_, err := HostDevices()
if err != testError {
t.Fatalf("Unexpected error %v, expected %v", err, testError)
}
}

View file

@ -0,0 +1,102 @@
// +build linux freebsd
package devices
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/opencontainers/runc/libcontainer/configs"
)
var (
ErrNotADevice = errors.New("not a device node")
)
// Testing dependencies
var (
osLstat = os.Lstat
ioutilReadDir = ioutil.ReadDir
)
// Given the path to a device and it's cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct.
func DeviceFromPath(path, permissions string) (*configs.Device, error) {
fileInfo, err := osLstat(path)
if err != nil {
return nil, err
}
var (
devType rune
mode = fileInfo.Mode()
fileModePermissionBits = os.FileMode.Perm(mode)
)
switch {
case mode&os.ModeDevice == 0:
return nil, ErrNotADevice
case mode&os.ModeCharDevice != 0:
fileModePermissionBits |= syscall.S_IFCHR
devType = 'c'
default:
fileModePermissionBits |= syscall.S_IFBLK
devType = 'b'
}
stat_t, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok {
return nil, fmt.Errorf("cannot determine the device number for device %s", path)
}
devNumber := int(stat_t.Rdev)
return &configs.Device{
Type: devType,
Path: path,
Major: Major(devNumber),
Minor: Minor(devNumber),
Permissions: permissions,
FileMode: fileModePermissionBits,
Uid: stat_t.Uid,
Gid: stat_t.Gid,
}, nil
}
func HostDevices() ([]*configs.Device, error) {
return getDevices("/dev")
}
func getDevices(path string) ([]*configs.Device, error) {
files, err := ioutilReadDir(path)
if err != nil {
return nil, err
}
out := []*configs.Device{}
for _, f := range files {
switch {
case f.IsDir():
switch f.Name() {
case "pts", "shm", "fd", "mqueue":
continue
default:
sub, err := getDevices(filepath.Join(path, f.Name()))
if err != nil {
return nil, err
}
out = append(out, sub...)
continue
}
case f.Name() == "console":
continue
}
device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm")
if err != nil {
if err == ErrNotADevice {
continue
}
return nil, err
}
out = append(out, device)
}
return out, nil
}

View file

@ -0,0 +1,3 @@
// +build windows
package devices

View file

@ -0,0 +1,24 @@
// +build linux freebsd
package devices
/*
This code provides support for manipulating linux device numbers. It should be replaced by normal syscall functions once http://code.google.com/p/go/issues/detail?id=8106 is solved.
You can read what they are here:
- http://www.makelinux.net/ldd3/chp-3-sect-2
- http://www.linux-tutorial.info/modules.php?name=MContent&pageid=94
Note! These are NOT the same as the MAJOR(dev_t device);, MINOR(dev_t device); and MKDEV(int major, int minor); functions as defined in <linux/kdev_t.h> as the representation of device numbers used by go is different than the one used internally to the kernel! - https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L9
*/
func Major(devNumber int) int64 {
return int64((devNumber >> 8) & 0xfff)
}
func Minor(devNumber int) int64 {
return int64((devNumber & 0xff) | ((devNumber >> 12) & 0xfff00))
}

View file

@ -16,9 +16,10 @@ const (
ContainerPaused
ContainerNotStopped
ContainerNotRunning
ContainerNotPaused
// Process errors
ProcessNotExecuted
NoProcessOps
// Common errors
ConfigInvalid
@ -46,6 +47,10 @@ func (c ErrorCode) String() string {
return "Container is not running"
case ConsoleExists:
return "Console exists for process"
case ContainerNotPaused:
return "Container is not paused"
case NoProcessOps:
return "No process operations"
default:
return "Unknown error"
}

View file

@ -0,0 +1,20 @@
package libcontainer
import "testing"
func TestErrorCode(t *testing.T) {
codes := map[ErrorCode]string{
IdInUse: "Id already in use",
InvalidIdFormat: "Invalid format",
ContainerPaused: "Container paused",
ConfigInvalid: "Invalid configuration",
SystemError: "System error",
ContainerNotExists: "Container does not exist",
}
for code, expected := range codes {
if actual := code.String(); actual != expected {
t.Fatalf("expected string %q but received %q", expected, actual)
}
}
}

View file

@ -5,7 +5,6 @@ package libcontainer
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@ -19,6 +18,7 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/configs/validate"
"github.com/opencontainers/runc/libcontainer/utils"
)
const (
@ -202,8 +202,12 @@ func (l *LinuxFactory) Load(id string) (Container, error) {
criuPath: l.CriuPath,
cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
root: containerRoot,
created: state.Created,
}
c.state = &createdState{c: c, s: Created}
if err := c.refreshState(); err != nil {
return nil, err
}
c.state = &nullState{c: c}
return c, nil
}
@ -226,21 +230,29 @@ func (l *LinuxFactory) StartInitialization() (err error) {
// clear the current process's environment to clean any libcontainer
// specific env vars.
os.Clearenv()
var i initer
defer func() {
// if we have an error during the initialization of the container's init then send it back to the
// parent process in the form of an initError.
if err != nil {
// ensure that any data sent from the parent is consumed so it doesn't
// receive ECONNRESET when the child writes to the pipe.
ioutil.ReadAll(pipe)
if err := json.NewEncoder(pipe).Encode(newSystemError(err)); err != nil {
if _, ok := i.(*linuxStandardInit); ok {
// Synchronisation only necessary for standard init.
if err := utils.WriteJSON(pipe, syncT{procError}); err != nil {
panic(err)
}
}
if err := utils.WriteJSON(pipe, newSystemError(err)); err != nil {
panic(err)
}
} else {
if err := utils.WriteJSON(pipe, syncT{procStart}); err != nil {
panic(err)
}
}
// ensure that this pipe is always closed
pipe.Close()
}()
i, err := newContainerInit(it, pipe)
i, err = newContainerInit(it, pipe)
if err != nil {
return err
}

View file

@ -0,0 +1,183 @@
// +build linux
package libcontainer
import (
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
"github.com/docker/docker/pkg/mount"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/utils"
)
func newTestRoot() (string, error) {
dir, err := ioutil.TempDir("", "libcontainer")
if err != nil {
return "", err
}
return dir, nil
}
func TestFactoryNew(t *testing.T) {
root, rerr := newTestRoot()
if rerr != nil {
t.Fatal(rerr)
}
defer os.RemoveAll(root)
factory, err := New(root, Cgroupfs)
if err != nil {
t.Fatal(err)
}
if factory == nil {
t.Fatal("factory should not be nil")
}
lfactory, ok := factory.(*LinuxFactory)
if !ok {
t.Fatal("expected linux factory returned on linux based systems")
}
if lfactory.Root != root {
t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root)
}
if factory.Type() != "libcontainer" {
t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer")
}
}
func TestFactoryNewTmpfs(t *testing.T) {
root, rerr := newTestRoot()
if rerr != nil {
t.Fatal(rerr)
}
defer os.RemoveAll(root)
factory, err := New(root, Cgroupfs, TmpfsRoot)
if err != nil {
t.Fatal(err)
}
if factory == nil {
t.Fatal("factory should not be nil")
}
lfactory, ok := factory.(*LinuxFactory)
if !ok {
t.Fatal("expected linux factory returned on linux based systems")
}
if lfactory.Root != root {
t.Fatalf("expected factory root to be %q but received %q", root, lfactory.Root)
}
if factory.Type() != "libcontainer" {
t.Fatalf("unexpected factory type: %q, expected %q", factory.Type(), "libcontainer")
}
mounted, err := mount.Mounted(lfactory.Root)
if err != nil {
t.Fatal(err)
}
if !mounted {
t.Fatalf("Factory Root is not mounted")
}
mounts, err := mount.GetMounts()
if err != nil {
t.Fatal(err)
}
var found bool
for _, m := range mounts {
if m.Mountpoint == lfactory.Root {
if m.Fstype != "tmpfs" {
t.Fatalf("Fstype of root: %s, expected %s", m.Fstype, "tmpfs")
}
if m.Source != "tmpfs" {
t.Fatalf("Source of root: %s, expected %s", m.Source, "tmpfs")
}
found = true
}
}
if !found {
t.Fatalf("Factory Root is not listed in mounts list")
}
defer syscall.Unmount(root, syscall.MNT_DETACH)
}
func TestFactoryLoadNotExists(t *testing.T) {
root, rerr := newTestRoot()
if rerr != nil {
t.Fatal(rerr)
}
defer os.RemoveAll(root)
factory, err := New(root, Cgroupfs)
if err != nil {
t.Fatal(err)
}
_, err = factory.Load("nocontainer")
if err == nil {
t.Fatal("expected nil error loading non-existing container")
}
lerr, ok := err.(Error)
if !ok {
t.Fatal("expected libcontainer error type")
}
if lerr.Code() != ContainerNotExists {
t.Fatalf("expected error code %s but received %s", ContainerNotExists, lerr.Code())
}
}
func TestFactoryLoadContainer(t *testing.T) {
root, err := newTestRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
// setup default container config and state for mocking
var (
id = "1"
expectedConfig = &configs.Config{
Rootfs: "/mycontainer/root",
}
expectedState = &State{
BaseState: BaseState{
InitProcessPid: 1024,
Config: *expectedConfig,
},
}
)
if err := os.Mkdir(filepath.Join(root, id), 0700); err != nil {
t.Fatal(err)
}
if err := marshal(filepath.Join(root, id, stateFilename), expectedState); err != nil {
t.Fatal(err)
}
factory, err := New(root, Cgroupfs)
if err != nil {
t.Fatal(err)
}
container, err := factory.Load(id)
if err != nil {
t.Fatal(err)
}
if container.ID() != id {
t.Fatalf("expected container id %q but received %q", id, container.ID())
}
config := container.Config()
if config.Rootfs != expectedConfig.Rootfs {
t.Fatalf("expected rootfs %q but received %q", expectedConfig.Rootfs, config.Rootfs)
}
lcontainer, ok := container.(*linuxContainer)
if !ok {
t.Fatal("expected linux container on linux based systems")
}
if lcontainer.initProcess.pid() != expectedState.InitProcessPid {
t.Fatalf("expected init pid %d but received %d", expectedState.InitProcessPid, lcontainer.initProcess.pid())
}
}
func marshal(path string, v interface{}) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
return utils.WriteJSON(f, v)
}

View file

@ -9,6 +9,19 @@ import (
"github.com/opencontainers/runc/libcontainer/stacktrace"
)
type syncType uint8
const (
procReady syncType = iota
procError
procStart
procRun
)
type syncT struct {
Type syncType `json:"type"`
}
var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}}
Code: {{.ECode}}
{{if .Message }}

View file

@ -0,0 +1,14 @@
package libcontainer
import (
"fmt"
"io/ioutil"
"testing"
)
func TestErrorDetail(t *testing.T) {
err := newGenericError(fmt.Errorf("test error"), SystemError)
if derr := err.Detail(ioutil.Discard); derr != nil {
t.Fatal(derr)
}
}

View file

@ -5,6 +5,7 @@ package libcontainer
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
@ -73,6 +74,7 @@ func newContainerInit(t initType, pipe *os.File) (initer, error) {
}, nil
case initStandard:
return &linuxStandardInit{
pipe: pipe,
parentPid: syscall.Getppid(),
config: config,
}, nil
@ -140,6 +142,27 @@ func finalizeNamespace(config *initConfig) error {
return nil
}
// syncParentReady sends to the given pipe a JSON payload which indicates that
// the init is ready to Exec the child process. It then waits for the parent to
// indicate that it is cleared to Exec.
func syncParentReady(pipe io.ReadWriter) error {
// Tell parent.
if err := utils.WriteJSON(pipe, syncT{procReady}); err != nil {
return err
}
// Wait for parent to give the all-clear.
var procSync syncT
if err := json.NewDecoder(pipe).Decode(&procSync); err != nil {
if err == io.EOF {
return fmt.Errorf("parent closed synchronisation channel")
}
if procSync.Type != procRun {
return fmt.Errorf("invalid synchronisation flag from parent")
}
}
return nil
}
// joinExistingNamespaces gets all the namespace paths specified for the container and
// does a setns on the namespace fd so that the current process joins the namespace.
func joinExistingNamespaces(namespaces []configs.Namespace) error {
@ -309,7 +332,7 @@ func killCgroupProcesses(m cgroups.Manager) error {
if err := m.Freeze(configs.Frozen); err != nil {
logrus.Warn(err)
}
pids, err := m.GetPids()
pids, err := m.GetAllPids()
if err != nil {
m.Freeze(configs.Thawed)
return err

View file

@ -0,0 +1,204 @@
package integration
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
)
func showFile(t *testing.T, fname string) error {
t.Logf("=== %s ===\n", fname)
f, err := os.Open(fname)
if err != nil {
t.Log(err)
return err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
t.Log(scanner.Text())
}
if err := scanner.Err(); err != nil {
return err
}
t.Logf("=== END ===\n")
return nil
}
func TestCheckpoint(t *testing.T) {
if testing.Short() {
return
}
root, err := newTestRoot()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Mounts = append(config.Mounts, &configs.Mount{
Destination: "/sys/fs/cgroup",
Device: "cgroup",
Flags: defaultMountFlags | syscall.MS_RDONLY,
})
factory, err := libcontainer.New(root, libcontainer.Cgroupfs)
if err != nil {
t.Fatal(err)
}
container, err := factory.Create("test", config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
var stdout bytes.Buffer
pconfig := libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
Stdout: &stdout,
}
err = container.Start(&pconfig)
stdinR.Close()
defer stdinW.Close()
if err != nil {
t.Fatal(err)
}
pid, err := pconfig.Pid()
if err != nil {
t.Fatal(err)
}
process, err := os.FindProcess(pid)
if err != nil {
t.Fatal(err)
}
imagesDir, err := ioutil.TempDir("", "criu")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(imagesDir)
checkpointOpts := &libcontainer.CriuOpts{
ImagesDirectory: imagesDir,
WorkDirectory: imagesDir,
}
dumpLog := filepath.Join(checkpointOpts.WorkDirectory, "dump.log")
restoreLog := filepath.Join(checkpointOpts.WorkDirectory, "restore.log")
if err := container.Checkpoint(checkpointOpts); err != nil {
showFile(t, dumpLog)
t.Fatal(err)
}
state, err := container.Status()
if err != nil {
t.Fatal(err)
}
if state != libcontainer.Running {
t.Fatal("Unexpected state checkpoint: ", state)
}
stdinW.Close()
_, err = process.Wait()
if err != nil {
t.Fatal(err)
}
// reload the container
container, err = factory.Load("test")
if err != nil {
t.Fatal(err)
}
restoreStdinR, restoreStdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
restoreProcessConfig := &libcontainer.Process{
Cwd: "/",
Stdin: restoreStdinR,
Stdout: &stdout,
}
err = container.Restore(restoreProcessConfig, checkpointOpts)
restoreStdinR.Close()
defer restoreStdinW.Close()
if err != nil {
showFile(t, restoreLog)
t.Fatal(err)
}
state, err = container.Status()
if err != nil {
t.Fatal(err)
}
if state != libcontainer.Running {
t.Fatal("Unexpected restore state: ", state)
}
pid, err = restoreProcessConfig.Pid()
if err != nil {
t.Fatal(err)
}
process, err = os.FindProcess(pid)
if err != nil {
t.Fatal(err)
}
_, err = restoreStdinW.WriteString("Hello!")
if err != nil {
t.Fatal(err)
}
restoreStdinW.Close()
s, err := process.Wait()
if err != nil {
t.Fatal(err)
}
if !s.Success() {
t.Fatal(s.String(), pid)
}
output := string(stdout.Bytes())
if !strings.Contains(output, "Hello!") {
t.Fatal("Did not restore the pipe correctly:", output)
}
}

View file

@ -0,0 +1,2 @@
// integration is used for integration testing of libcontainer
package integration

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,402 @@
package integration
import (
"bytes"
"io"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/opencontainers/runc/libcontainer"
)
func TestExecIn(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"ps"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if !strings.Contains(out, "cat") || !strings.Contains(out, "ps") {
t.Fatalf("unexpected running process, output %q", out)
}
}
func TestExecInRlimit(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"/bin/sh", "-c", "ulimit -n"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if limit := strings.TrimSpace(out); limit != "1025" {
t.Fatalf("expected rlimit to be 1025, got %s", limit)
}
}
func TestExecInError(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer func() {
stdinW.Close()
if _, err := process.Wait(); err != nil {
t.Log(err)
}
}()
ok(t, err)
for i := 0; i < 42; i++ {
var out bytes.Buffer
unexistent := &libcontainer.Process{
Cwd: "/",
Args: []string{"unexistent"},
Env: standardEnvironment,
Stdout: &out,
}
err = container.Start(unexistent)
if err == nil {
t.Fatal("Should be an error")
}
if !strings.Contains(err.Error(), "executable file not found") {
t.Fatalf("Should be error about not found executable, got %s", err)
}
if !bytes.Contains(out.Bytes(), []byte("executable file not found")) {
t.Fatalf("executable file not found error not delivered to stdio:\n%s", out.String())
}
}
}
func TestExecInTTY(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
var stdout bytes.Buffer
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"ps"},
Env: standardEnvironment,
}
console, err := ps.NewConsole(0)
copy := make(chan struct{})
go func() {
io.Copy(&stdout, console)
close(copy)
}()
ok(t, err)
err = container.Start(ps)
ok(t, err)
select {
case <-time.After(5 * time.Second):
t.Fatal("Waiting for copy timed out")
case <-copy:
}
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := stdout.String()
if !strings.Contains(out, "cat") || !strings.Contains(string(out), "ps") {
t.Fatalf("unexpected running process, output %q", out)
}
}
func TestExecInEnvironment(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
process2 := &libcontainer.Process{
Cwd: "/",
Args: []string{"env"},
Env: []string{
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"DEBUG=true",
"DEBUG=false",
"ENV=test",
},
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(process2)
ok(t, err)
waitProcess(process2, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
// check execin's process environment
if !strings.Contains(out, "DEBUG=false") ||
!strings.Contains(out, "ENV=test") ||
!strings.Contains(out, "HOME=/root") ||
!strings.Contains(out, "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin") ||
strings.Contains(out, "DEBUG=true") {
t.Fatalf("unexpected running process, output %q", out)
}
}
func TestExecinPassExtraFiles(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
// Execute a first process in the container
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
if err != nil {
t.Fatal(err)
}
var stdout bytes.Buffer
pipeout1, pipein1, err := os.Pipe()
pipeout2, pipein2, err := os.Pipe()
inprocess := &libcontainer.Process{
Cwd: "/",
Args: []string{"sh", "-c", "cd /proc/$$/fd; echo -n *; echo -n 1 >3; echo -n 2 >4"},
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
ExtraFiles: []*os.File{pipein1, pipein2},
Stdin: nil,
Stdout: &stdout,
}
err = container.Start(inprocess)
if err != nil {
t.Fatal(err)
}
waitProcess(inprocess, t)
stdinW.Close()
waitProcess(process, t)
out := string(stdout.Bytes())
// fd 5 is the directory handle for /proc/$$/fd
if out != "0 1 2 3 4 5" {
t.Fatalf("expected to have the file descriptors '0 1 2 3 4 5' passed to exec, got '%s'", out)
}
var buf = []byte{0}
_, err = pipeout1.Read(buf)
if err != nil {
t.Fatal(err)
}
out1 := string(buf)
if out1 != "1" {
t.Fatalf("expected first pipe to receive '1', got '%s'", out1)
}
_, err = pipeout2.Read(buf)
if err != nil {
t.Fatal(err)
}
out2 := string(buf)
if out2 != "2" {
t.Fatalf("expected second pipe to receive '2', got '%s'", out2)
}
}
func TestExecInOomScoreAdj(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
ok(t, err)
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.OomScoreAdj = 200
container, err := newContainer(config)
ok(t, err)
defer container.Destroy()
stdinR, stdinW, err := os.Pipe()
ok(t, err)
process := &libcontainer.Process{
Cwd: "/",
Args: []string{"cat"},
Env: standardEnvironment,
Stdin: stdinR,
}
err = container.Start(process)
stdinR.Close()
defer stdinW.Close()
ok(t, err)
buffers := newStdBuffers()
ps := &libcontainer.Process{
Cwd: "/",
Args: []string{"/bin/sh", "-c", "cat /proc/self/oom_score_adj"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(ps)
ok(t, err)
waitProcess(ps, t)
stdinW.Close()
waitProcess(process, t)
out := buffers.Stdout.String()
if oomScoreAdj := strings.TrimSpace(out); oomScoreAdj != strconv.Itoa(config.OomScoreAdj) {
t.Fatalf("expected oomScoreAdj to be %d, got %s", config.OomScoreAdj, oomScoreAdj)
}
}

View file

@ -0,0 +1,60 @@
package integration
import (
"os"
"runtime"
"testing"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
)
// init runs the libcontainer initialization code because of the busybox style needs
// to work around the go runtime and the issues with forking
func init() {
if len(os.Args) < 2 || os.Args[1] != "init" {
return
}
runtime.GOMAXPROCS(1)
runtime.LockOSThread()
factory, err := libcontainer.New("")
if err != nil {
logrus.Fatalf("unable to initialize for container: %s", err)
}
if err := factory.StartInitialization(); err != nil {
logrus.Fatal(err)
}
}
var (
factory libcontainer.Factory
systemdFactory libcontainer.Factory
)
func TestMain(m *testing.M) {
var (
err error
ret int = 0
)
logrus.SetOutput(os.Stderr)
logrus.SetLevel(logrus.InfoLevel)
factory, err = libcontainer.New(".", libcontainer.Cgroupfs)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
if systemd.UseSystemd() {
systemdFactory, err = libcontainer.New(".", libcontainer.SystemdCgroups)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
}
ret = m.Run()
os.Exit(ret)
}

View file

@ -0,0 +1,219 @@
// +build linux,cgo,seccomp
package integration
import (
"strings"
"syscall"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
libseccomp "github.com/seccomp/libseccomp-golang"
)
func TestSeccompDenyGetcwd(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "getcwd",
Action: configs.Errno,
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
pwd := &libcontainer.Process{
Cwd: "/",
Args: []string{"pwd"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(pwd)
if err != nil {
t.Fatal(err)
}
ps, err := pwd.Wait()
if err == nil {
t.Fatal("Expecting error (negative return code); instead exited cleanly!")
}
var exitCode int
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
t.Fatalf("Unrecognized exit reason!")
}
if exitCode == 0 {
t.Fatalf("Getcwd should fail with negative exit code, instead got %d!", exitCode)
}
expected := "pwd: getcwd: Operation not permitted"
actual := strings.Trim(buffers.Stderr.String(), "\n")
if actual != expected {
t.Fatalf("Expected output %s but got %s\n", expected, actual)
}
}
func TestSeccompPermitWriteConditional(t *testing.T) {
if testing.Short() {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 1,
Op: configs.GreaterThan,
},
},
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
dmesg := &libcontainer.Process{
Cwd: "/",
Args: []string{"busybox", "ls", "/"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(dmesg)
if err != nil {
t.Fatal(err)
}
if _, err := dmesg.Wait(); err != nil {
t.Fatalf("%s: %s", err, buffers.Stderr)
}
}
func TestSeccompDenyWriteConditional(t *testing.T) {
if testing.Short() {
return
}
// Only test if library version is v2.2.1 or higher
// Conditional filtering will always error in v2.2.0 and lower
major, minor, micro := libseccomp.GetLibraryVersion()
if (major == 2 && minor < 2) || (major == 2 && minor == 2 && micro < 1) {
return
}
rootfs, err := newRootfs()
if err != nil {
t.Fatal(err)
}
defer remove(rootfs)
config := newTemplateConfig(rootfs)
config.Seccomp = &configs.Seccomp{
DefaultAction: configs.Allow,
Syscalls: []*configs.Syscall{
{
Name: "write",
Action: configs.Errno,
Args: []*configs.Arg{
{
Index: 0,
Value: 1,
Op: configs.GreaterThan,
},
},
},
},
}
container, err := newContainer(config)
if err != nil {
t.Fatal(err)
}
defer container.Destroy()
buffers := newStdBuffers()
dmesg := &libcontainer.Process{
Cwd: "/",
Args: []string{"busybox", "ls", "does_not_exist"},
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(dmesg)
if err != nil {
t.Fatal(err)
}
ps, err := dmesg.Wait()
if err == nil {
t.Fatal("Expecting negative return, instead got 0!")
}
var exitCode int
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
t.Fatalf("Unrecognized exit reason!")
}
if exitCode == 0 {
t.Fatalf("Busybox should fail with negative exit code, instead got %d!", exitCode)
}
// We're denying write to stderr, so we expect an empty buffer
expected := ""
actual := strings.Trim(buffers.Stderr.String(), "\n")
if actual != expected {
t.Fatalf("Expected output %s but got %s\n", expected, actual)
}
}

View file

@ -0,0 +1,120 @@
package integration
import (
"syscall"
"github.com/opencontainers/runc/libcontainer/configs"
)
var standardEnvironment = []string{
"HOME=/root",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=integration",
"TERM=xterm",
}
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
// newTemplateConfig returns a base template for running a container
//
// it uses a network strategy of just setting a loopback interface
// and the default setup for devices
func newTemplateConfig(rootfs string) *configs.Config {
return &configs.Config{
Rootfs: rootfs,
Capabilities: []string{
"CAP_CHOWN",
"CAP_DAC_OVERRIDE",
"CAP_FSETID",
"CAP_FOWNER",
"CAP_MKNOD",
"CAP_NET_RAW",
"CAP_SETGID",
"CAP_SETUID",
"CAP_SETFCAP",
"CAP_SETPCAP",
"CAP_NET_BIND_SERVICE",
"CAP_SYS_CHROOT",
"CAP_KILL",
"CAP_AUDIT_WRITE",
},
Namespaces: configs.Namespaces([]configs.Namespace{
{Type: configs.NEWNS},
{Type: configs.NEWUTS},
{Type: configs.NEWIPC},
{Type: configs.NEWPID},
{Type: configs.NEWNET},
}),
Cgroups: &configs.Cgroup{
Path: "integration/test",
Resources: &configs.Resources{
MemorySwappiness: -1,
AllowAllDevices: false,
AllowedDevices: configs.DefaultAllowedDevices,
},
},
MaskPaths: []string{
"/proc/kcore",
},
ReadonlyPaths: []string{
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
},
Devices: configs.DefaultAutoCreatedDevices,
Hostname: "integration",
Mounts: []*configs.Mount{
{
Source: "proc",
Destination: "/proc",
Device: "proc",
Flags: defaultMountFlags,
},
{
Source: "tmpfs",
Destination: "/dev",
Device: "tmpfs",
Flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME,
Data: "mode=755",
},
{
Source: "devpts",
Destination: "/dev/pts",
Device: "devpts",
Flags: syscall.MS_NOSUID | syscall.MS_NOEXEC,
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
},
{
Device: "tmpfs",
Source: "shm",
Destination: "/dev/shm",
Data: "mode=1777,size=65536k",
Flags: defaultMountFlags,
},
{
Source: "mqueue",
Destination: "/dev/mqueue",
Device: "mqueue",
Flags: defaultMountFlags,
},
{
Source: "sysfs",
Destination: "/sys",
Device: "sysfs",
Flags: defaultMountFlags | syscall.MS_RDONLY,
},
},
Networks: []*configs.Network{
{
Type: "loopback",
Address: "127.0.0.1/0",
Gateway: "localhost",
},
},
Rlimits: []configs.Rlimit{
{
Type: syscall.RLIMIT_NOFILE,
Hard: uint64(1025),
Soft: uint64(1025),
},
},
}
}

View file

@ -0,0 +1,141 @@
package integration
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/configs"
)
func newStdBuffers() *stdBuffers {
return &stdBuffers{
Stdin: bytes.NewBuffer(nil),
Stdout: bytes.NewBuffer(nil),
Stderr: bytes.NewBuffer(nil),
}
}
type stdBuffers struct {
Stdin *bytes.Buffer
Stdout *bytes.Buffer
Stderr *bytes.Buffer
}
func (b *stdBuffers) String() string {
s := []string{}
if b.Stderr != nil {
s = append(s, b.Stderr.String())
}
if b.Stdout != nil {
s = append(s, b.Stdout.String())
}
return strings.Join(s, "|")
}
// ok fails the test if an err is not nil.
func ok(t testing.TB, err error) {
if err != nil {
_, file, line, _ := runtime.Caller(1)
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
}
}
func waitProcess(p *libcontainer.Process, t *testing.T) {
_, file, line, _ := runtime.Caller(1)
status, err := p.Wait()
if err != nil {
t.Fatalf("%s:%d: unexpected error: %s\n\n", filepath.Base(file), line, err.Error())
}
if !status.Success() {
t.Fatalf("%s:%d: unexpected status: %s\n\n", filepath.Base(file), line, status.String())
}
}
// newRootfs creates a new tmp directory and copies the busybox root filesystem
func newRootfs() (string, error) {
dir, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
if err := copyBusybox(dir); err != nil {
return "", err
}
return dir, nil
}
func remove(dir string) {
os.RemoveAll(dir)
}
// copyBusybox copies the rootfs for a busybox container created for the test image
// into the new directory for the specific test
func copyBusybox(dest string) error {
out, err := exec.Command("sh", "-c", fmt.Sprintf("cp -R /busybox/* %s/", dest)).CombinedOutput()
if err != nil {
return fmt.Errorf("copy error %q: %q", err, out)
}
return nil
}
func newContainer(config *configs.Config) (libcontainer.Container, error) {
f := factory
if config.Cgroups != nil && config.Cgroups.Parent == "system.slice" {
f = systemdFactory
}
return f.Create("testCT", config)
}
// runContainer runs the container with the specific config and arguments
//
// buffers are returned containing the STDOUT and STDERR output for the run
// along with the exit code and any go error
func runContainer(config *configs.Config, console string, args ...string) (buffers *stdBuffers, exitCode int, err error) {
container, err := newContainer(config)
if err != nil {
return nil, -1, err
}
defer container.Destroy()
buffers = newStdBuffers()
process := &libcontainer.Process{
Cwd: "/",
Args: args,
Env: standardEnvironment,
Stdin: buffers.Stdin,
Stdout: buffers.Stdout,
Stderr: buffers.Stderr,
}
err = container.Start(process)
if err != nil {
return buffers, -1, err
}
ps, err := process.Wait()
if err != nil {
return buffers, -1, err
}
status := ps.Sys().(syscall.WaitStatus)
if status.Exited() {
exitCode = status.ExitStatus()
} else if status.Signaled() {
exitCode = -int(status.Signal())
} else {
return buffers, -1, err
}
return
}

View file

@ -0,0 +1,67 @@
// +build linux
package keyctl
import (
"fmt"
"syscall"
"strings"
"strconv"
"unsafe"
)
const KEYCTL_JOIN_SESSION_KEYRING = 1
const KEYCTL_SETPERM = 5
const KEYCTL_DESCRIBE = 6
type KeySerial uint32
func JoinSessionKeyring(name string) (KeySerial, error) {
var _name *byte = nil
var err error
if len(name) > 0 {
_name, err = syscall.BytePtrFromString(name)
if err != nil {
return KeySerial(0), err
}
}
sessKeyId, _, errn := syscall.Syscall(syscall.SYS_KEYCTL, KEYCTL_JOIN_SESSION_KEYRING, uintptr(unsafe.Pointer(_name)), 0)
if errn != 0 {
return 0, fmt.Errorf("could not create session key: %v", errn)
}
return KeySerial(sessKeyId), nil
}
// modify permissions on a keyring by reading the current permissions,
// anding the bits with the given mask (clearing permissions) and setting
// additional permission bits
func ModKeyringPerm(ringId KeySerial, mask, setbits uint32) error {
dest := make([]byte, 1024)
destBytes := unsafe.Pointer(&dest[0])
if _, _, err := syscall.Syscall6(syscall.SYS_KEYCTL, uintptr(KEYCTL_DESCRIBE), uintptr(ringId), uintptr(destBytes), uintptr(len(dest)), 0, 0); err != 0 {
return err
}
res := strings.Split(string(dest), ";")
if len(res) < 5 {
return fmt.Errorf("Destination buffer for key description is too small")
}
// parse permissions
perm64, err := strconv.ParseUint(res[3], 16, 32)
if err != nil {
return err
}
perm := (uint32(perm64) & mask) | setbits
if _, _, err := syscall.Syscall(syscall.SYS_KEYCTL, uintptr(KEYCTL_SETPERM), uintptr(ringId), uintptr(perm)); err != 0 {
return err
}
return nil
}

View file

@ -0,0 +1,144 @@
// +build selinux,linux
package label
import (
"os"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/selinux"
)
func TestInit(t *testing.T) {
if selinux.SelinuxEnabled() {
var testNull []string
plabel, mlabel, err := InitLabels(testNull)
if err != nil {
t.Log("InitLabels Failed")
t.Fatal(err)
}
testDisabled := []string{"disable"}
plabel, mlabel, err = InitLabels(testDisabled)
if err != nil {
t.Log("InitLabels Disabled Failed")
t.Fatal(err)
}
if plabel != "" {
t.Log("InitLabels Disabled Failed")
t.Fatal()
}
testUser := []string{"user:user_u", "role:user_r", "type:user_t", "level:s0:c1,c15"}
plabel, mlabel, err = InitLabels(testUser)
if err != nil {
t.Log("InitLabels User Failed")
t.Fatal(err)
}
if plabel != "user_u:user_r:user_t:s0:c1,c15" || mlabel != "user_u:object_r:svirt_sandbox_file_t:s0:c1,c15" {
t.Log("InitLabels User Match Failed")
t.Log(plabel, mlabel)
t.Fatal(err)
}
testBadData := []string{"user", "role:user_r", "type:user_t", "level:s0:c1,c15"}
plabel, mlabel, err = InitLabels(testBadData)
if err == nil {
t.Log("InitLabels Bad Failed")
t.Fatal(err)
}
}
}
func TestDuplicateLabel(t *testing.T) {
secopt := DupSecOpt("system_u:system_r:svirt_lxc_net_t:s0:c1,c2")
t.Log(secopt)
for _, opt := range secopt {
con := strings.SplitN(opt, ":", 3)
if len(con) != 3 || con[0] != "label" {
t.Errorf("Invalid DupSecOpt return value")
continue
}
if con[1] == "user" {
if con[2] != "system_u" {
t.Errorf("DupSecOpt Failed user incorrect")
}
continue
}
if con[1] == "role" {
if con[2] != "system_r" {
t.Errorf("DupSecOpt Failed role incorrect")
}
continue
}
if con[1] == "type" {
if con[2] != "svirt_lxc_net_t" {
t.Errorf("DupSecOpt Failed type incorrect")
}
continue
}
if con[1] == "level" {
if con[2] != "s0:c1,c2" {
t.Errorf("DupSecOpt Failed level incorrect")
}
continue
}
t.Errorf("DupSecOpt Failed invalid field %q", con[1])
}
secopt = DisableSecOpt()
if secopt[0] != "label:disable" {
t.Errorf("DisableSecOpt Failed level incorrect")
}
}
func TestRelabel(t *testing.T) {
testdir := "/tmp/test"
if err := os.Mkdir(testdir, 0755); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(testdir)
label := "system_u:system_r:svirt_sandbox_file_t:s0:c1,c2"
if err := Relabel(testdir, "", true); err != nil {
t.Fatal("Relabel with no label failed: %v", err)
}
if err := Relabel(testdir, label, true); err != nil {
t.Fatal("Relabel shared failed: %v", err)
}
if err := Relabel(testdir, label, false); err != nil {
t.Fatal("Relabel unshared failed: %v", err)
}
if err := Relabel("/etc", label, false); err == nil {
t.Fatal("Relabel /etc succeeded")
}
if err := Relabel("/", label, false); err == nil {
t.Fatal("Relabel / succeeded")
}
if err := Relabel("/usr", label, false); err == nil {
t.Fatal("Relabel /usr succeeded")
}
}
func TestValidate(t *testing.T) {
if err := Validate("zZ"); err != ErrIncompatibleLabel {
t.Fatalf("Expected incompatible error, got %v", err)
}
if err := Validate("Z"); err != nil {
t.Fatal(err)
}
if err := Validate("z"); err != nil {
t.Fatal(err)
}
if err := Validate(""); err != nil {
t.Fatal(err)
}
}
func TestIsShared(t *testing.T) {
if shared := IsShared("Z"); shared {
t.Fatal("Expected label `Z` to not be shared, got %v", shared)
}
if shared := IsShared("z"); !shared {
t.Fatal("Expected label `z` to be shared, got %v", shared)
}
if shared := IsShared("Zz"); !shared {
t.Fatal("Expected label `Zz` to be shared, got %v", shared)
}
}

View file

@ -12,31 +12,32 @@ import (
const oomCgroupName = "memory"
// notifyOnOOM returns channel on which you can expect event about OOM,
// if process died without OOM this channel will be closed.
// s is current *libcontainer.State for container.
func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
dir := paths[oomCgroupName]
if dir == "" {
return nil, fmt.Errorf("There is no path for %q in state", oomCgroupName)
}
oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
type PressureLevel uint
const (
LowPressure PressureLevel = iota
MediumPressure
CriticalPressure
)
func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) {
evFile, err := os.Open(filepath.Join(cgDir, evName))
if err != nil {
return nil, err
}
fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
if syserr != 0 {
oomControl.Close()
evFile.Close()
return nil, syserr
}
eventfd := os.NewFile(fd, "eventfd")
eventControlPath := filepath.Join(dir, "cgroup.event_control")
data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
eventControlPath := filepath.Join(cgDir, "cgroup.event_control")
data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg)
if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
eventfd.Close()
oomControl.Close()
evFile.Close()
return nil, err
}
ch := make(chan struct{})
@ -44,7 +45,7 @@ func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
defer func() {
close(ch)
eventfd.Close()
oomControl.Close()
evFile.Close()
}()
buf := make([]byte, 8)
for {
@ -61,3 +62,28 @@ func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
}()
return ch, nil
}
// notifyOnOOM returns channel on which you can expect event about OOM,
// if process died without OOM this channel will be closed.
func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
dir := paths[oomCgroupName]
if dir == "" {
return nil, fmt.Errorf("path %q missing", oomCgroupName)
}
return registerMemoryEvent(dir, "memory.oom_control", "")
}
func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) {
dir := paths[oomCgroupName]
if dir == "" {
return nil, fmt.Errorf("path %q missing", oomCgroupName)
}
if level > CriticalPressure {
return nil, fmt.Errorf("invalid pressure level %d", level)
}
levelStr := []string{"low", "medium", "critical"}[level]
return registerMemoryEvent(dir, "memory.pressure_level", levelStr)
}

View file

@ -0,0 +1,128 @@
// +build linux
package libcontainer
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
"time"
)
type notifyFunc func(paths map[string]string) (<-chan struct{}, error)
func testMemoryNotification(t *testing.T, evName string, notify notifyFunc, targ string) {
memoryPath, err := ioutil.TempDir("", "testmemnotification-"+evName)
if err != nil {
t.Fatal(err)
}
evFile := filepath.Join(memoryPath, evName)
eventPath := filepath.Join(memoryPath, "cgroup.event_control")
if err := ioutil.WriteFile(evFile, []byte{}, 0700); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(eventPath, []byte{}, 0700); err != nil {
t.Fatal(err)
}
paths := map[string]string{
"memory": memoryPath,
}
ch, err := notify(paths)
if err != nil {
t.Fatal("expected no error, got:", err)
}
data, err := ioutil.ReadFile(eventPath)
if err != nil {
t.Fatal("couldn't read event control file:", err)
}
var eventFd, evFd int
var arg string
if targ != "" {
_, err = fmt.Sscanf(string(data), "%d %d %s", &eventFd, &evFd, &arg)
} else {
_, err = fmt.Sscanf(string(data), "%d %d", &eventFd, &evFd)
}
if err != nil || arg != targ {
t.Fatalf("invalid control data %q: %s", data, err)
}
// re-open the eventfd
efd, err := syscall.Dup(eventFd)
if err != nil {
t.Fatal("unable to reopen eventfd:", err)
}
defer syscall.Close(efd)
if err != nil {
t.Fatal("unable to dup event fd:", err)
}
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, 1)
if _, err := syscall.Write(efd, buf); err != nil {
t.Fatal("unable to write to eventfd:", err)
}
select {
case <-ch:
case <-time.After(100 * time.Millisecond):
t.Fatal("no notification on channel after 100ms")
}
// simulate what happens when a cgroup is destroyed by cleaning up and then
// writing to the eventfd.
if err := os.RemoveAll(memoryPath); err != nil {
t.Fatal(err)
}
if _, err := syscall.Write(efd, buf); err != nil {
t.Fatal("unable to write to eventfd:", err)
}
// give things a moment to shut down
select {
case _, ok := <-ch:
if ok {
t.Fatal("expected no notification to be triggered")
}
case <-time.After(100 * time.Millisecond):
}
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(evFd), syscall.F_GETFD, 0); err != syscall.EBADF {
t.Error("expected event control to be closed")
}
if _, _, err := syscall.Syscall(syscall.SYS_FCNTL, uintptr(eventFd), syscall.F_GETFD, 0); err != syscall.EBADF {
t.Error("expected event fd to be closed")
}
}
func TestNotifyOnOOM(t *testing.T) {
f := func(paths map[string]string) (<-chan struct{}, error) {
return notifyOnOOM(paths)
}
testMemoryNotification(t, "memory.oom_control", f, "")
}
func TestNotifyMemoryPressure(t *testing.T) {
tests := map[PressureLevel]string{
LowPressure: "low",
MediumPressure: "medium",
CriticalPressure: "critical",
}
for level, arg := range tests {
f := func(paths map[string]string) (<-chan struct{}, error) {
return notifyMemoryPressure(paths, level)
}
testMemoryNotification(t, "memory.pressure_level", f, arg)
}
}

View file

@ -0,0 +1,143 @@
package nsenter
import (
"bytes"
"encoding/json"
"io"
"os"
"os/exec"
"strings"
"syscall"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/vishvananda/netlink/nl"
)
type pid struct {
Pid int `json:"Pid"`
}
func TestNsenterAlivePid(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
t.Fatalf("nsenter failed to start %v", err)
}
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.PidAttr,
Value: uint32(os.Getpid()),
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
decoder := json.NewDecoder(parent)
var pid *pid
if err := decoder.Decode(&pid); err != nil {
t.Fatalf("%v", err)
}
if err := cmd.Wait(); err != nil {
t.Fatalf("nsenter exits with a non-zero exit status")
}
p, err := os.FindProcess(pid.Pid)
if err != nil {
t.Fatalf("%v", err)
}
p.Wait()
}
func TestNsenterInvalidPid(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
t.Fatal("nsenter exits with a zero exit status")
}
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.PidAttr,
Value: 0,
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
if err := cmd.Wait(); err == nil {
t.Fatal("nsenter exits with a zero exit status")
}
}
func TestNsenterDeadPid(t *testing.T) {
deadCmd := exec.Command("true")
if err := deadCmd.Run(); err != nil {
t.Fatal(err)
}
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITTYPE=setns", "_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
t.Fatal("nsenter exits with a zero exit status")
}
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.PidAttr,
Value: uint32(deadCmd.Process.Pid),
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
if err := cmd.Wait(); err == nil {
t.Fatal("nsenter exits with a zero exit status")
}
}
func init() {
if strings.HasPrefix(os.Args[0], "nsenter-") {
os.Exit(0)
}
return
}
func newPipe() (parent *os.File, child *os.File, err error) {
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
}

View file

@ -17,6 +17,7 @@
#include <sched.h>
#include <signal.h>
#include <bits/sockaddr.h>
#include <linux/netlink.h>
#include <linux/types.h>
#include <stdint.h>

View file

@ -55,7 +55,7 @@ type Process struct {
// Wait releases any resources associated with the Process
func (p Process) Wait() (*os.ProcessState, error) {
if p.ops == nil {
return nil, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
}
return p.ops.wait()
}
@ -65,7 +65,7 @@ func (p Process) Pid() (int, error) {
// math.MinInt32 is returned here, because it's invalid value
// for the kill() system call.
if p.ops == nil {
return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
}
return p.ops.pid(), nil
}
@ -73,7 +73,7 @@ func (p Process) Pid() (int, error) {
// Signal sends a signal to the Process.
func (p Process) Signal(sig os.Signal) error {
if p.ops == nil {
return newGenericError(fmt.Errorf("invalid process"), ProcessNotExecuted)
return newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
}
return p.ops.signal(sig)
}

View file

@ -5,6 +5,7 @@ package libcontainer
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
@ -15,6 +16,7 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
)
type parentProcess interface {
@ -83,9 +85,10 @@ func (p *setnsProcess) start() (err error) {
return newSystemError(err)
}
}
if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil {
if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
return newSystemError(err)
}
if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
return newSystemError(err)
}
@ -95,6 +98,7 @@ func (p *setnsProcess) start() (err error) {
if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
return newSystemError(err)
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return newSystemError(ierr)
@ -198,7 +202,6 @@ func (p *initProcess) start() (err error) {
return newSystemError(err)
}
p.setExternalDescriptors(fds)
// Do this before syncing with child so that no children
// can escape the cgroup
if err := p.manager.Apply(p.pid()); err != nil {
@ -229,13 +232,56 @@ func (p *initProcess) start() (err error) {
if err := p.sendConfig(); err != nil {
return newSystemError(err)
}
// wait for the child process to fully complete and receive an error message
// if one was encoutered
var ierr *genericError
if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
var (
procSync syncT
sentRun bool
ierr *genericError
)
loop:
for {
if err := json.NewDecoder(p.parentPipe).Decode(&procSync); err != nil {
if err == io.EOF {
break loop
}
return newSystemError(err)
}
switch procSync.Type {
case procStart:
break loop
case procReady:
if err := p.manager.Set(p.config.Config); err != nil {
return newSystemError(err)
}
// Sync with child.
if err := utils.WriteJSON(p.parentPipe, syncT{procRun}); err != nil {
return newSystemError(err)
}
sentRun = true
case procError:
// wait for the child process to fully complete and receive an error message
// if one was encoutered
if err := json.NewDecoder(p.parentPipe).Decode(&ierr); err != nil && err != io.EOF {
return newSystemError(err)
}
if ierr != nil {
break loop
}
// Programmer error.
panic("No error following JSON procError payload.")
default:
return newSystemError(fmt.Errorf("invalid JSON synchronisation payload from child"))
}
}
if !sentRun {
return newSystemError(fmt.Errorf("could not synchronise with container process"))
}
if err := syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR); err != nil {
return newSystemError(err)
}
// Must be done after Shutdown so the child will exit and we can wait for it.
if ierr != nil {
p.wait()
return newSystemError(ierr)
}
return nil
@ -269,12 +315,10 @@ func (p *initProcess) startTime() (string, error) {
}
func (p *initProcess) sendConfig() error {
// send the state to the container's init process then shutdown writes for the parent
if err := json.NewEncoder(p.parentPipe).Encode(p.config); err != nil {
return err
}
// shutdown writes for the parent side of the pipe
return syscall.Shutdown(int(p.parentPipe.Fd()), syscall.SHUT_WR)
// send the config to the container's init process, we don't use JSON Encode
// here because there might be a problem in JSON decoder in some cases, see:
// https://github.com/docker/docker/issues/14203#issuecomment-174177790
return utils.WriteJSON(p.parentPipe, p.config)
}
func (p *initProcess) createNetworkInterfaces() error {

View file

@ -18,6 +18,8 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runc/libcontainer/system"
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
)
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
@ -293,12 +295,30 @@ func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) {
// checkMountDestination checks to ensure that the mount destination is not over the top of /proc.
// dest is required to be an abs path and have any symlinks resolved before calling this function.
func checkMountDestination(rootfs, dest string) error {
if filepath.Clean(rootfs) == filepath.Clean(dest) {
if libcontainerUtils.CleanPath(rootfs) == libcontainerUtils.CleanPath(dest) {
return fmt.Errorf("mounting into / is prohibited")
}
invalidDestinations := []string{
"/proc",
}
// White list, it should be sub directories of invalid destinations
validDestinations := []string{
// These entries can be bind mounted by files emulated by fuse,
// so commands like top, free displays stats in container.
"/proc/cpuinfo",
"/proc/diskstats",
"/proc/meminfo",
"/proc/stats",
}
for _, valid := range validDestinations {
path, err := filepath.Rel(filepath.Join(rootfs, valid), dest)
if err != nil {
return err
}
if path == "." {
return nil
}
}
for _, invalid := range invalidDestinations {
path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest)
if err != nil {
@ -365,11 +385,12 @@ func reOpenDevNull() error {
// Create the device nodes in the container.
func createDevices(config *configs.Config) error {
useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER)
oldMask := syscall.Umask(0000)
for _, node := range config.Devices {
// containers running in a user namespace are not allowed to mknod
// devices so we can just bind mount it from the host.
if err := createDeviceNode(config.Rootfs, node, config.Namespaces.Contains(configs.NEWUSER)); err != nil {
if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil {
syscall.Umask(oldMask)
return err
}

View file

@ -0,0 +1,37 @@
// +build linux
package libcontainer
import "testing"
func TestCheckMountDestOnProc(t *testing.T) {
dest := "/rootfs/proc/"
err := checkMountDestination("/rootfs", dest)
if err == nil {
t.Fatal("destination inside proc should return an error")
}
}
func TestCheckMountDestInSys(t *testing.T) {
dest := "/rootfs//sys/fs/cgroup"
err := checkMountDestination("/rootfs", dest)
if err != nil {
t.Fatal("destination inside /sys should not return an error")
}
}
func TestCheckMountDestFalsePositive(t *testing.T) {
dest := "/rootfs/sysfiles/fs/cgroup"
err := checkMountDestination("/rootfs", dest)
if err != nil {
t.Fatal(err)
}
}
func TestCheckMountRoot(t *testing.T) {
dest := "/rootfs"
err := checkMountDestination("/rootfs", dest)
if err == nil {
t.Fatal(err)
}
}

View file

@ -0,0 +1,47 @@
Name: cat
State: R (running)
Tgid: 19383
Ngid: 0
Pid: 19383
PPid: 19275
TracerPid: 0
Uid: 1000 1000 1000 1000
Gid: 1000 1000 1000 1000
FDSize: 256
Groups: 24 25 27 29 30 44 46 102 104 108 111 1000 1001
NStgid: 19383
NSpid: 19383
NSpgid: 19383
NSsid: 19275
VmPeak: 5944 kB
VmSize: 5944 kB
VmLck: 0 kB
VmPin: 0 kB
VmHWM: 744 kB
VmRSS: 744 kB
VmData: 324 kB
VmStk: 136 kB
VmExe: 48 kB
VmLib: 1776 kB
VmPTE: 32 kB
VmPMD: 12 kB
VmSwap: 0 kB
Threads: 1
SigQ: 0/30067
SigPnd: 0000000000000000
ShdPnd: 0000000000000000
SigBlk: 0000000000000000
SigIgn: 0000000000000080
SigCgt: 0000000000000000
CapInh: 0000000000000000
CapPrm: 0000000000000000
CapEff: 0000000000000000
CapBnd: 0000003fffffffff
CapAmb: 0000000000000000
Seccomp: 0
Cpus_allowed: f
Cpus_allowed_list: 0-3
Mems_allowed: 00000000,00000001
Mems_allowed_list: 0
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1

View file

@ -3,8 +3,11 @@
package seccomp
import (
"bufio"
"fmt"
"log"
"os"
"strings"
"syscall"
"github.com/opencontainers/runc/libcontainer/configs"
@ -17,6 +20,9 @@ var (
actKill = libseccomp.ActKill
actTrace = libseccomp.ActTrace.SetReturnCode(int16(syscall.EPERM))
actErrno = libseccomp.ActErrno.SetReturnCode(int16(syscall.EPERM))
// SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER.
SeccompModeFilter = uintptr(2)
)
// Filters given syscalls in a container, preventing them from being used
@ -73,6 +79,24 @@ func InitSeccomp(config *configs.Seccomp) error {
return nil
}
// IsEnabled returns if the kernel has been configured to support seccomp.
func IsEnabled() bool {
// Try to read from /proc/self/status for kernels > 3.8
s, err := parseStatusFile("/proc/self/status")
if err != nil {
// Check if Seccomp is supported, via CONFIG_SECCOMP.
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL {
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL {
return true
}
}
return false
}
_, ok := s["Seccomp"]
return ok
}
// Convert Libcontainer Action to Libseccomp ScmpAction
func getAction(act configs.Action) (libseccomp.ScmpAction, error) {
switch act {
@ -178,3 +202,30 @@ func matchCall(filter *libseccomp.ScmpFilter, call *configs.Syscall) error {
return nil
}
func parseStatusFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
s := bufio.NewScanner(f)
status := make(map[string]string)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
text := s.Text()
parts := strings.Split(text, ":")
if len(parts) <= 1 {
continue
}
status[parts[0]] = parts[1]
}
return status, nil
}

View file

@ -0,0 +1,17 @@
// +build linux,cgo,seccomp
package seccomp
import "testing"
func TestParseStatusFile(t *testing.T) {
s, err := parseStatusFile("fixtures/proc_self_status")
if err != nil {
t.Fatal(err)
}
if _, ok := s["Seccomp"]; !ok {
t.Fatal("expected to find 'Seccomp' in the map but did not.")
}
}

View file

@ -17,3 +17,8 @@ func InitSeccomp(config *configs.Seccomp) error {
}
return nil
}
// IsEnabled returns false, because it is not supported.
func IsEnabled() bool {
return false
}

View file

@ -0,0 +1,477 @@
// +build linux
package selinux
import (
"bufio"
"crypto/rand"
"encoding/binary"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"github.com/docker/docker/pkg/mount"
"github.com/opencontainers/runc/libcontainer/system"
)
const (
Enforcing = 1
Permissive = 0
Disabled = -1
selinuxDir = "/etc/selinux/"
selinuxConfig = selinuxDir + "config"
selinuxTypeTag = "SELINUXTYPE"
selinuxTag = "SELINUX"
selinuxPath = "/sys/fs/selinux"
xattrNameSelinux = "security.selinux"
stRdOnly = 0x01
)
var (
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
mcsList = make(map[string]bool)
selinuxfs = "unknown"
selinuxEnabled = false // Stores whether selinux is currently enabled
selinuxEnabledChecked = false // Stores whether selinux enablement has been checked or established yet
)
type SELinuxContext map[string]string
// SetDisabled disables selinux support for the package
func SetDisabled() {
selinuxEnabled, selinuxEnabledChecked = false, true
}
// getSelinuxMountPoint returns the path to the mountpoint of an selinuxfs
// filesystem or an empty string if no mountpoint is found. Selinuxfs is
// a proc-like pseudo-filesystem that exposes the selinux policy API to
// processes. The existence of an selinuxfs mount is used to determine
// whether selinux is currently enabled or not.
func getSelinuxMountPoint() string {
if selinuxfs != "unknown" {
return selinuxfs
}
selinuxfs = ""
mounts, err := mount.GetMounts()
if err != nil {
return selinuxfs
}
for _, mount := range mounts {
if mount.Fstype == "selinuxfs" {
selinuxfs = mount.Mountpoint
break
}
}
if selinuxfs != "" {
var buf syscall.Statfs_t
syscall.Statfs(selinuxfs, &buf)
if (buf.Flags & stRdOnly) == 1 {
selinuxfs = ""
}
}
return selinuxfs
}
// SelinuxEnabled returns whether selinux is currently enabled.
func SelinuxEnabled() bool {
if selinuxEnabledChecked {
return selinuxEnabled
}
selinuxEnabledChecked = true
if fs := getSelinuxMountPoint(); fs != "" {
if con, _ := Getcon(); con != "kernel" {
selinuxEnabled = true
}
}
return selinuxEnabled
}
func readConfig(target string) (value string) {
var (
val, key string
bufin *bufio.Reader
)
in, err := os.Open(selinuxConfig)
if err != nil {
return ""
}
defer in.Close()
bufin = bufio.NewReader(in)
for done := false; !done; {
var line string
if line, err = bufin.ReadString('\n'); err != nil {
if err != io.EOF {
return ""
}
done = true
}
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
if key == target {
return strings.Trim(val, "\"")
}
}
}
return ""
}
func getSELinuxPolicyRoot() string {
return selinuxDir + readConfig(selinuxTypeTag)
}
func readCon(name string) (string, error) {
var val string
in, err := os.Open(name)
if err != nil {
return "", err
}
defer in.Close()
_, err = fmt.Fscanf(in, "%s", &val)
return val, err
}
// Setfilecon sets the SELinux label for this path or returns an error.
func Setfilecon(path string, scon string) error {
return system.Lsetxattr(path, xattrNameSelinux, []byte(scon), 0)
}
// Getfilecon returns the SELinux label for this path or returns an error.
func Getfilecon(path string) (string, error) {
con, err := system.Lgetxattr(path, xattrNameSelinux)
// Trim the NUL byte at the end of the byte buffer, if present.
if con[len(con)-1] == '\x00' {
con = con[:len(con)-1]
}
return string(con), err
}
func Setfscreatecon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()), scon)
}
func Getfscreatecon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/fscreate", syscall.Gettid()))
}
// Getcon returns the SELinux label of the current process thread, or an error.
func Getcon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/current", syscall.Gettid()))
}
// Getpidcon returns the SELinux label of the given pid, or an error.
func Getpidcon(pid int) (string, error) {
return readCon(fmt.Sprintf("/proc/%d/attr/current", pid))
}
func Getexeccon() (string, error) {
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
}
func writeCon(name string, val string) error {
out, err := os.OpenFile(name, os.O_WRONLY, 0)
if err != nil {
return err
}
defer out.Close()
if val != "" {
_, err = out.Write([]byte(val))
} else {
_, err = out.Write(nil)
}
return err
}
func Setexeccon(scon string) error {
return writeCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()), scon)
}
func (c SELinuxContext) Get() string {
return fmt.Sprintf("%s:%s:%s:%s", c["user"], c["role"], c["type"], c["level"])
}
func NewContext(scon string) SELinuxContext {
c := make(SELinuxContext)
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
c["user"] = con[0]
c["role"] = con[1]
c["type"] = con[2]
c["level"] = con[3]
}
return c
}
func ReserveLabel(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
mcsAdd(con[3])
}
}
func selinuxEnforcePath() string {
return fmt.Sprintf("%s/enforce", selinuxPath)
}
func SelinuxGetEnforce() int {
var enforce int
enforceS, err := readCon(selinuxEnforcePath())
if err != nil {
return -1
}
enforce, err = strconv.Atoi(string(enforceS))
if err != nil {
return -1
}
return enforce
}
func SelinuxSetEnforce(mode int) error {
return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode))
}
func SelinuxGetEnforceMode() int {
switch readConfig(selinuxTag) {
case "enforcing":
return Enforcing
case "permissive":
return Permissive
}
return Disabled
}
func mcsAdd(mcs string) error {
if mcsList[mcs] {
return fmt.Errorf("MCS Label already exists")
}
mcsList[mcs] = true
return nil
}
func mcsDelete(mcs string) {
mcsList[mcs] = false
}
func IntToMcs(id int, catRange uint32) string {
var (
SETSIZE = int(catRange)
TIER = SETSIZE
ORD = id
)
if id < 1 || id > 523776 {
return ""
}
for ORD > TIER {
ORD = ORD - TIER
TIER -= 1
}
TIER = SETSIZE - TIER
ORD = ORD + TIER
return fmt.Sprintf("s0:c%d,c%d", TIER, ORD)
}
func uniqMcs(catRange uint32) string {
var (
n uint32
c1, c2 uint32
mcs string
)
for {
binary.Read(rand.Reader, binary.LittleEndian, &n)
c1 = n % catRange
binary.Read(rand.Reader, binary.LittleEndian, &n)
c2 = n % catRange
if c1 == c2 {
continue
} else {
if c1 > c2 {
t := c1
c1 = c2
c2 = t
}
}
mcs = fmt.Sprintf("s0:c%d,c%d", c1, c2)
if err := mcsAdd(mcs); err != nil {
continue
}
break
}
return mcs
}
func FreeLxcContexts(scon string) {
if len(scon) != 0 {
con := strings.SplitN(scon, ":", 4)
mcsDelete(con[3])
}
}
func GetLxcContexts() (processLabel string, fileLabel string) {
var (
val, key string
bufin *bufio.Reader
)
if !SelinuxEnabled() {
return "", ""
}
lxcPath := fmt.Sprintf("%s/contexts/lxc_contexts", getSELinuxPolicyRoot())
in, err := os.Open(lxcPath)
if err != nil {
return "", ""
}
defer in.Close()
bufin = bufio.NewReader(in)
for done := false; !done; {
var line string
if line, err = bufin.ReadString('\n'); err != nil {
if err == io.EOF {
done = true
} else {
goto exit
}
}
line = strings.TrimSpace(line)
if len(line) == 0 {
// Skip blank lines
continue
}
if line[0] == ';' || line[0] == '#' {
// Skip comments
continue
}
if groups := assignRegex.FindStringSubmatch(line); groups != nil {
key, val = strings.TrimSpace(groups[1]), strings.TrimSpace(groups[2])
if key == "process" {
processLabel = strings.Trim(val, "\"")
}
if key == "file" {
fileLabel = strings.Trim(val, "\"")
}
}
}
if processLabel == "" || fileLabel == "" {
return "", ""
}
exit:
// mcs := IntToMcs(os.Getpid(), 1024)
mcs := uniqMcs(1024)
scon := NewContext(processLabel)
scon["level"] = mcs
processLabel = scon.Get()
scon = NewContext(fileLabel)
scon["level"] = mcs
fileLabel = scon.Get()
return processLabel, fileLabel
}
func SecurityCheckContext(val string) error {
return writeCon(fmt.Sprintf("%s.context", selinuxPath), val)
}
func CopyLevel(src, dest string) (string, error) {
if src == "" {
return "", nil
}
if err := SecurityCheckContext(src); err != nil {
return "", err
}
if err := SecurityCheckContext(dest); err != nil {
return "", err
}
scon := NewContext(src)
tcon := NewContext(dest)
mcsDelete(tcon["level"])
mcsAdd(scon["level"])
tcon["level"] = scon["level"]
return tcon.Get(), nil
}
// Prevent users from relabing system files
func badPrefix(fpath string) error {
var badprefixes = []string{"/usr"}
for _, prefix := range badprefixes {
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
return fmt.Errorf("Relabeling content in %s is not allowed.", prefix)
}
}
return nil
}
// Change the fpath file object to the SELinux label scon.
// If the fpath is a directory and recurse is true Chcon will walk the
// directory tree setting the label
func Chcon(fpath string, scon string, recurse bool) error {
if scon == "" {
return nil
}
if err := badPrefix(fpath); err != nil {
return err
}
callback := func(p string, info os.FileInfo, err error) error {
return Setfilecon(p, scon)
}
if recurse {
return filepath.Walk(fpath, callback)
}
return Setfilecon(fpath, scon)
}
// DupSecOpt takes an SELinux process label and returns security options that
// can will set the SELinux Type and Level for future container processes
func DupSecOpt(src string) []string {
if src == "" {
return nil
}
con := NewContext(src)
if con["user"] == "" ||
con["role"] == "" ||
con["type"] == "" ||
con["level"] == "" {
return nil
}
return []string{"label:user:" + con["user"],
"label:role:" + con["role"],
"label:type:" + con["type"],
"label:level:" + con["level"]}
}
// DisableSecOpt returns a security opt that can be used to disabling SELinux
// labeling support for future container processes
func DisableSecOpt() []string {
return []string{"label:disable"}
}

View file

@ -0,0 +1,75 @@
// +build linux,selinux
package selinux_test
import (
"os"
"testing"
"github.com/opencontainers/runc/libcontainer/selinux"
)
func TestSetfilecon(t *testing.T) {
if selinux.SelinuxEnabled() {
tmp := "selinux_test"
out, _ := os.OpenFile(tmp, os.O_WRONLY|os.O_CREATE, 0)
out.Close()
err := selinux.Setfilecon(tmp, "system_u:object_r:bin_t:s0")
if err != nil {
t.Log("Setfilecon failed")
t.Fatal(err)
}
os.Remove(tmp)
}
}
func TestSELinux(t *testing.T) {
var (
err error
plabel, flabel string
)
if selinux.SelinuxEnabled() {
t.Log("Enabled")
plabel, flabel = selinux.GetLxcContexts()
t.Log(plabel)
t.Log(flabel)
selinux.FreeLxcContexts(plabel)
plabel, flabel = selinux.GetLxcContexts()
t.Log(plabel)
t.Log(flabel)
selinux.FreeLxcContexts(plabel)
t.Log("getenforce ", selinux.SelinuxGetEnforce())
mode := selinux.SelinuxGetEnforceMode()
t.Log("getenforcemode ", mode)
defer selinux.SelinuxSetEnforce(mode)
if err := selinux.SelinuxSetEnforce(selinux.Enforcing); err != nil {
t.Fatalf("enforcing selinux failed: %v", err)
}
if err := selinux.SelinuxSetEnforce(selinux.Permissive); err != nil {
t.Fatalf("setting selinux mode to permissive failed: %v", err)
}
selinux.SelinuxSetEnforce(mode)
pid := os.Getpid()
t.Logf("PID:%d MCS:%s\n", pid, selinux.IntToMcs(pid, 1023))
err = selinux.Setfscreatecon("unconfined_u:unconfined_r:unconfined_t:s0")
if err == nil {
t.Log(selinux.Getfscreatecon())
} else {
t.Log("setfscreatecon failed", err)
t.Fatal(err)
}
err = selinux.Setfscreatecon("")
if err == nil {
t.Log(selinux.Getfscreatecon())
} else {
t.Log("setfscreatecon failed", err)
t.Fatal(err)
}
t.Log(selinux.Getpidcon(1))
} else {
t.Log("Disabled")
}
}

View file

@ -6,6 +6,7 @@ import (
"os"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runc/libcontainer/keys"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runc/libcontainer/seccomp"
"github.com/opencontainers/runc/libcontainer/system"
@ -18,6 +19,10 @@ type linuxSetnsInit struct {
}
func (l *linuxSetnsInit) Init() error {
// do not inherit the parent's session keyring
if _, err := keyctl.JoinSessionKeyring("_ses"); err != nil {
return err
}
if err := setupRlimits(l.config.Config); err != nil {
return err
}

View file

@ -0,0 +1,27 @@
package stacktrace
import "testing"
func captureFunc() Stacktrace {
return Capture(0)
}
func TestCaptureTestFunc(t *testing.T) {
stack := captureFunc()
if len(stack.Frames) == 0 {
t.Fatal("expected stack frames to be returned")
}
// the first frame is the caller
frame := stack.Frames[0]
if expected := "captureFunc"; frame.Function != expected {
t.Fatalf("expteced function %q but recevied %q", expected, frame.Function)
}
if expected := "github.com/opencontainers/runc/libcontainer/stacktrace"; frame.Package != expected {
t.Fatalf("expected package %q but received %q", expected, frame.Package)
}
if expected := "capture_test.go"; frame.File != expected {
t.Fatalf("expected file %q but received %q", expected, frame.File)
}
}

View file

@ -0,0 +1,20 @@
package stacktrace
import "testing"
func TestParsePackageName(t *testing.T) {
var (
name = "github.com/opencontainers/runc/libcontainer/stacktrace.captureFunc"
expectedPackage = "github.com/opencontainers/runc/libcontainer/stacktrace"
expectedFunction = "captureFunc"
)
pack, funcName := parseFunctionName(name)
if pack != expectedPackage {
t.Fatalf("expected package %q but received %q", expectedPackage, pack)
}
if funcName != expectedFunction {
t.Fatalf("expected function %q but received %q", expectedFunction, funcName)
}
}

View file

@ -3,22 +3,37 @@
package libcontainer
import (
"io"
"os"
"syscall"
"github.com/opencontainers/runc/libcontainer/apparmor"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/keys"
"github.com/opencontainers/runc/libcontainer/label"
"github.com/opencontainers/runc/libcontainer/seccomp"
"github.com/opencontainers/runc/libcontainer/system"
)
type linuxStandardInit struct {
pipe io.ReadWriter
parentPid int
config *initConfig
}
func (l *linuxStandardInit) Init() error {
// do not inherit the parent's session keyring
sessKeyId, err := keyctl.JoinSessionKeyring("")
if err != nil {
return err
}
// make session keyring searcheable
// without user ns we need 'UID' search permissions
// with user ns we need 'other' search permissions
if err := keyctl.ModKeyringPerm(sessKeyId, 0xffffffff, 0x080008); err != nil {
return err
}
// join any namespaces via a path to the namespace fd if provided
if err := joinExistingNamespaces(l.config.Config.Namespaces); err != nil {
return err
@ -50,7 +65,6 @@ func (l *linuxStandardInit) Init() error {
if err := setOomScoreAdj(l.config.Config.OomScoreAdj); err != nil {
return err
}
label.Init()
// InitializeMountNamespace() can be executed only for a new mount namespace
if l.config.Config.Namespaces.Contains(configs.NEWNS) {
@ -75,7 +89,6 @@ func (l *linuxStandardInit) Init() error {
return err
}
}
for _, path := range l.config.Config.ReadonlyPaths {
if err := remountReadonly(path); err != nil {
return err
@ -90,6 +103,12 @@ func (l *linuxStandardInit) Init() error {
if err != nil {
return err
}
// Tell our parent that we're ready to Execv. This must be done before the
// Seccomp rules have been applied, because we need to be able to read and
// write to a socket.
if err := syncParentReady(l.pipe); err != nil {
return err
}
if l.config.Config.Seccomp != nil {
if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
return err

View file

@ -49,6 +49,7 @@ func destroy(c *linuxContainer) error {
if herr := runPoststopHooks(c); err == nil {
err = herr
}
c.state = &stoppedState{c: c}
return err
}
@ -119,7 +120,7 @@ func (r *runningState) transition(s containerState) error {
case *pausedState:
r.c.state = s
return nil
case *runningState, *nullState:
case *runningState:
return nil
}
return newStateTransitionError(r, s)
@ -148,7 +149,7 @@ func (p *pausedState) status() Status {
func (p *pausedState) transition(s containerState) error {
switch s.(type) {
case *runningState:
case *runningState, *stoppedState:
p.c.state = s
return nil
case *pausedState:
@ -158,6 +159,16 @@ func (p *pausedState) transition(s containerState) error {
}
func (p *pausedState) destroy() error {
isRunning, err := p.c.isRunning()
if err != nil {
return err
}
if !isRunning {
if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
return destroy(p.c)
}
return newGenericError(fmt.Errorf("container is paused"), ContainerPaused)
}
@ -191,27 +202,25 @@ func (r *restoredState) destroy() error {
return destroy(r.c)
}
// nullState is used whenever a container is restored, loaded, or setting additional
// createdState is used whenever a container is restored, loaded, or setting additional
// processes inside and it should not be destroyed when it is exiting.
type nullState struct {
type createdState struct {
c *linuxContainer
s Status
}
func (n *nullState) status() Status {
func (n *createdState) status() Status {
return n.s
}
func (n *nullState) transition(s containerState) error {
switch s.(type) {
case *restoredState:
n.c.state = s
default:
// do nothing for null states
}
func (n *createdState) transition(s containerState) error {
n.c.state = s
return nil
}
func (n *nullState) destroy() error {
return nil
func (n *createdState) destroy() error {
if err := n.c.refreshState(); err != nil {
return err
}
return n.c.state.destroy()
}

View file

@ -0,0 +1,79 @@
// +build linux
package libcontainer
import "testing"
func TestStateStatus(t *testing.T) {
states := map[containerState]Status{
&stoppedState{}: Destroyed,
&runningState{}: Running,
&restoredState{}: Running,
&pausedState{}: Paused,
}
for s, status := range states {
if s.status() != status {
t.Fatalf("state returned %s but expected %s", s.status(), status)
}
}
}
func isStateTransitionError(err error) bool {
_, ok := err.(*stateTransitionError)
return ok
}
func TestStoppedStateTransition(t *testing.T) {
s := &stoppedState{c: &linuxContainer{}}
valid := []containerState{
&stoppedState{},
&runningState{},
&restoredState{},
}
for _, v := range valid {
if err := s.transition(v); err != nil {
t.Fatal(err)
}
}
err := s.transition(&pausedState{})
if err == nil {
t.Fatal("transition to paused state should fail")
}
if !isStateTransitionError(err) {
t.Fatal("expected stateTransitionError")
}
}
func TestPausedStateTransition(t *testing.T) {
s := &pausedState{c: &linuxContainer{}}
valid := []containerState{
&pausedState{},
&runningState{},
&stoppedState{},
}
for _, v := range valid {
if err := s.transition(v); err != nil {
t.Fatal(err)
}
}
}
func TestRestoredStateTransition(t *testing.T) {
s := &restoredState{c: &linuxContainer{}}
valid := []containerState{
&stoppedState{},
&runningState{},
}
for _, v := range valid {
if err := s.transition(v); err != nil {
t.Fatal(err)
}
}
err := s.transition(&createdState{})
if err == nil {
t.Fatal("transition to created state should fail")
}
if !isStateTransitionError(err) {
t.Fatal("expected stateTransitionError")
}
}

View file

@ -3,6 +3,9 @@
package system
import (
"bufio"
"fmt"
"os"
"os/exec"
"syscall"
"unsafe"
@ -75,3 +78,37 @@ func Setctty() error {
}
return nil
}
/*
* Detect whether we are currently running in a user namespace.
* Copied from github.com/lxc/lxd/shared/util.go
*/
func RunningInUserNS() bool {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
/*
* This kernel-provided file only exists if user namespaces are
* supported
*/
return false
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return false
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return false
}
return true
}

Some files were not shown because too many files have changed in this diff Show more