kpod: shut down the storage library before exiting
Before exiting, have kpod shut down the storage library if it can. This should keep us from leaving mountpoints for the root (for non-vfs cases) and run directory (with newer containers/storage) busy when testing kpod. Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
This commit is contained in:
parent
3a2518ce33
commit
ef8df00e6a
8 changed files with 33 additions and 2 deletions
|
@ -10,6 +10,10 @@ import (
|
|||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var (
|
||||
stores = make(map[storage.Store]struct{})
|
||||
)
|
||||
|
||||
func getStore(c *libkpod.Config) (storage.Store, error) {
|
||||
options := storage.DefaultStoreOptions
|
||||
options.GraphRoot = c.Root
|
||||
|
@ -22,9 +26,18 @@ func getStore(c *libkpod.Config) (storage.Store, error) {
|
|||
return nil, err
|
||||
}
|
||||
is.Transport.SetStore(store)
|
||||
stores[store] = struct{}{}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
func shutdownStores() {
|
||||
for store := range stores {
|
||||
if _, err := store.Shutdown(false); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getConfig(c *cli.Context) (*libkpod.Config, error) {
|
||||
config := libkpod.DefaultConfig()
|
||||
if c.GlobalIsSet("config") {
|
||||
|
|
|
@ -86,6 +86,7 @@ func diffCmd(c *cli.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get container server")
|
||||
}
|
||||
defer server.Shutdown()
|
||||
|
||||
to := c.Args().Get(0)
|
||||
changes, err := server.GetDiff("", to)
|
||||
|
|
|
@ -71,6 +71,7 @@ func inspectCmd(c *cli.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get container server")
|
||||
}
|
||||
defer server.Shutdown()
|
||||
if err = server.Update(); err != nil {
|
||||
return errors.Wrapf(err, "could not update list of containers")
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ func logsCmd(c *cli.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create container server")
|
||||
}
|
||||
defer server.Shutdown()
|
||||
err = server.Update()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not update list of containers")
|
||||
|
|
|
@ -50,6 +50,16 @@ func main() {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
app.After = func(*cli.Context) error {
|
||||
// called by Run() when the command handler succeeds
|
||||
shutdownStores()
|
||||
return nil
|
||||
}
|
||||
cli.OsExiter = func(code int) {
|
||||
// called by Run() when the command fails, bypassing After()
|
||||
shutdownStores()
|
||||
os.Exit(code)
|
||||
}
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "config, c",
|
||||
|
@ -82,6 +92,6 @@ func main() {
|
|||
}
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Errorf(err.Error())
|
||||
os.Exit(1)
|
||||
cli.OsExiter(1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ func renameCmd(c *cli.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get container server")
|
||||
}
|
||||
defer server.Shutdown()
|
||||
err = server.Update()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not update list of containers")
|
||||
|
|
|
@ -70,6 +70,7 @@ func statsCmd(c *cli.Context) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create container server")
|
||||
}
|
||||
defer containerServer.Shutdown()
|
||||
err = containerServer.Update()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not update list of containers")
|
||||
|
|
|
@ -570,8 +570,11 @@ func (c *ContainerServer) ReleasePodName(name string) {
|
|||
// Shutdown attempts to shut down the server's storage cleanly
|
||||
func (c *ContainerServer) Shutdown() error {
|
||||
_, err := c.store.Shutdown(false)
|
||||
if err != nil && errors.Cause(err) != cstorage.ErrLayerUsedByContainer {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type containerServerState struct {
|
||||
containers oci.ContainerStorer
|
||||
|
|
Loading…
Reference in a new issue