2017-03-02 22:41:51 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
2017-03-17 21:46:25 +00:00
|
|
|
"io"
|
2017-03-02 22:41:51 +00:00
|
|
|
"os"
|
|
|
|
"sync"
|
|
|
|
"text/tabwriter"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
contentapi "github.com/docker/containerd/api/services/content"
|
2017-03-22 00:36:37 +00:00
|
|
|
"github.com/docker/containerd/images"
|
2017-03-02 22:41:51 +00:00
|
|
|
"github.com/docker/containerd/log"
|
|
|
|
"github.com/docker/containerd/progress"
|
|
|
|
"github.com/docker/containerd/remotes"
|
|
|
|
contentservice "github.com/docker/containerd/services/content"
|
|
|
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
|
|
"github.com/urfave/cli"
|
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
|
|
|
var fetchCommand = cli.Command{
|
|
|
|
Name: "fetch",
|
|
|
|
Usage: "fetch all content for an image into containerd",
|
|
|
|
ArgsUsage: "[flags] <remote> <object>",
|
|
|
|
Description: `Fetch an image into containerd.
|
|
|
|
|
|
|
|
This command ensures that containerd has all the necessary resources to build
|
|
|
|
an image's rootfs and convert the configuration to a runtime format supported
|
|
|
|
by containerd.
|
|
|
|
|
|
|
|
This command uses the same syntax, of remote and object, as 'dist
|
|
|
|
fetch-object'. We may want to make this nicer, but agnostism is preferred for
|
|
|
|
the moment.
|
|
|
|
|
|
|
|
Right now, the responsibility of the daemon and the cli aren't quite clear. Do
|
|
|
|
not use this implementation as a guide. The end goal should be having metadata,
|
|
|
|
content and snapshots ready for a direct use via the 'ctr run'.
|
|
|
|
|
|
|
|
Most of this is experimental and there are few leaps to make this work.`,
|
|
|
|
Flags: []cli.Flag{},
|
|
|
|
Action: func(clicontext *cli.Context) error {
|
|
|
|
var (
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
ctx = background
|
|
|
|
ref = clicontext.Args().First()
|
2017-03-02 22:41:51 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
conn, err := connectGRPC(clicontext)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resolver, err := getResolver(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-17 21:46:25 +00:00
|
|
|
|
|
|
|
ongoing := newJobs()
|
2017-03-02 22:41:51 +00:00
|
|
|
|
|
|
|
ingester := contentservice.NewIngesterFromClient(contentapi.NewContentClient(conn))
|
2017-03-17 21:46:25 +00:00
|
|
|
provider := contentservice.NewProviderFromClient(contentapi.NewContentClient(conn))
|
|
|
|
|
|
|
|
// TODO(stevvooe): Need to replace this with content store client.
|
2017-03-02 22:41:51 +00:00
|
|
|
cs, err := resolveContentStore(clicontext)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
eg, ctx := errgroup.WithContext(ctx)
|
|
|
|
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
resolved := make(chan struct{})
|
2017-03-02 22:41:51 +00:00
|
|
|
eg.Go(func() error {
|
2017-03-17 21:46:25 +00:00
|
|
|
ongoing.add(ref)
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
name, desc, fetcher, err := resolver.Resolve(ctx, ref)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.G(ctx).WithField("image", name).Debug("fetching")
|
|
|
|
close(resolved)
|
|
|
|
|
2017-03-22 00:36:37 +00:00
|
|
|
return images.Dispatch(ctx,
|
|
|
|
images.Handlers(images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
2017-03-17 21:46:25 +00:00
|
|
|
ongoing.add(remotes.MakeRefKey(ctx, desc))
|
|
|
|
return nil, nil
|
|
|
|
}),
|
|
|
|
remotes.FetchHandler(ingester, fetcher),
|
2017-03-22 00:36:37 +00:00
|
|
|
images.ChildrenHandler(provider),
|
2017-03-17 21:46:25 +00:00
|
|
|
),
|
|
|
|
desc)
|
2017-03-02 22:41:51 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
errs := make(chan error)
|
|
|
|
go func() {
|
|
|
|
defer close(errs)
|
|
|
|
errs <- eg.Wait()
|
|
|
|
}()
|
|
|
|
|
|
|
|
ticker := time.NewTicker(100 * time.Millisecond)
|
|
|
|
fw := progress.NewWriter(os.Stdout)
|
|
|
|
start := time.Now()
|
|
|
|
defer ticker.Stop()
|
|
|
|
var done bool
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
fw.Flush()
|
|
|
|
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
tw := tabwriter.NewWriter(fw, 1, 8, 1, ' ', 0)
|
2017-03-17 21:46:25 +00:00
|
|
|
|
2017-03-02 22:41:51 +00:00
|
|
|
statuses := map[string]statusInfo{}
|
|
|
|
|
|
|
|
activeSeen := map[string]struct{}{}
|
|
|
|
if !done {
|
|
|
|
active, err := cs.Active()
|
|
|
|
if err != nil {
|
|
|
|
log.G(ctx).WithError(err).Error("active check failed")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// update status of active entries!
|
|
|
|
for _, active := range active {
|
|
|
|
statuses[active.Ref] = statusInfo{
|
|
|
|
Ref: active.Ref,
|
|
|
|
Status: "downloading",
|
|
|
|
Offset: active.Offset,
|
|
|
|
Total: active.Total,
|
|
|
|
StartedAt: active.StartedAt,
|
|
|
|
UpdatedAt: active.UpdatedAt,
|
|
|
|
}
|
|
|
|
activeSeen[active.Ref] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
js := ongoing.jobs()
|
2017-03-02 22:41:51 +00:00
|
|
|
// now, update the items in jobs that are not in active
|
|
|
|
for _, j := range js {
|
|
|
|
if _, ok := activeSeen[j]; ok {
|
|
|
|
continue
|
|
|
|
}
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
status := "done"
|
|
|
|
|
|
|
|
if j == ref {
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
status = "resolved"
|
|
|
|
default:
|
|
|
|
status = "resolving"
|
|
|
|
}
|
|
|
|
}
|
2017-03-02 22:41:51 +00:00
|
|
|
|
|
|
|
statuses[j] = statusInfo{
|
|
|
|
Ref: j,
|
cmd/dist, remotes: simplify resolution flow
After receiving feedback during containerd summit walk through of the
pull POC, we found that the resolution flow for names was out of place.
We could see this present in awkward places where we were trying to
re-resolve whether something was a digest or a tag and extra retries to
various endpoints.
By centering this problem around, "what do we write in the metadata
store?", the following interface comes about:
```
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, fetcher Fetcher, err error)
```
The above takes an "opaque" reference (we'll get to this later) and
returns the canonical name for the object, a content description of the
object and a `Fetcher` that can be used to retrieve the object and its
child resources. We can write `name` into the metadata store, pointing
at the descriptor. Descisions about discovery, trust, provenance,
distribution are completely abstracted away from the pulling code.
A first response to such a monstrosity is "that is a lot of return
arguments". When we look at the actual, we can see that in practice, the
usage pattern works well, albeit we don't quite demonstrate the utility
of `name`, which will be more apparent later. Designs that allowed
separate resolution of the `Fetcher` and the return of a collected
object were considered. Let's give this a chance before we go
refactoring this further.
With this change, we introduce a reference package with helps for
remotes to decompose "docker-esque" references into consituent
components, without arbitrarily enforcing those opinions on the backend.
Utlimately, the name and the reference used to qualify that name are
completely opaque to containerd. Obviously, implementors will need to
show some candor in following some conventions, but the possibilities
are fairly wide. Structurally, we still maintain the concept of the
locator and object but the interpretation is up to the resolver.
For the most part, the `dist` tool operates exactly the same, except
objects can be fetched with a reference:
```
dist fetch docker.io/library/redis:latest
```
The above should work well with a running containerd instance. I
recommend giving this a try with `fetch-object`, as well. With
`fetch-object`, it is easy for one to better understand the intricacies
of the OCI/Docker image formats.
Ultimately, this serves the main purpose of the elusive "metadata
store".
Signed-off-by: Stephen J Day <stephen.day@docker.com>
2017-03-08 00:51:08 +00:00
|
|
|
Status: status, // for now!
|
2017-03-02 22:41:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
var ordered []statusInfo
|
2017-03-02 22:41:51 +00:00
|
|
|
for _, j := range js {
|
2017-03-17 21:46:25 +00:00
|
|
|
ordered = append(ordered, statuses[j])
|
2017-03-02 22:41:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
display(tw, ordered, start)
|
2017-03-02 22:41:51 +00:00
|
|
|
tw.Flush()
|
|
|
|
|
|
|
|
if done {
|
|
|
|
fw.Flush()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
case err := <-errs:
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
done = true
|
|
|
|
case <-ctx.Done():
|
|
|
|
done = true // allow ui to update once more
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// jobs provides a way of identifying the download keys for a particular task
|
|
|
|
// encountering during the pull walk.
|
|
|
|
//
|
|
|
|
// This is very minimal and will probably be replaced with something more
|
|
|
|
// featured.
|
|
|
|
type jobs struct {
|
|
|
|
added map[string]struct{}
|
|
|
|
refs []string
|
|
|
|
mu sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func newJobs() *jobs {
|
|
|
|
return &jobs{added: make(map[string]struct{})}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *jobs) add(ref string) {
|
|
|
|
j.mu.Lock()
|
|
|
|
defer j.mu.Unlock()
|
|
|
|
|
|
|
|
if _, ok := j.added[ref]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
j.refs = append(j.refs, ref)
|
|
|
|
j.added[ref] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *jobs) jobs() []string {
|
|
|
|
j.mu.Lock()
|
|
|
|
defer j.mu.Unlock()
|
|
|
|
|
|
|
|
var jobs []string
|
|
|
|
for _, j := range j.refs {
|
|
|
|
jobs = append(jobs, j)
|
|
|
|
}
|
|
|
|
|
|
|
|
return jobs
|
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
type statusInfo struct {
|
|
|
|
Ref string
|
|
|
|
Status string
|
|
|
|
Offset int64
|
|
|
|
Total int64
|
|
|
|
StartedAt time.Time
|
|
|
|
UpdatedAt time.Time
|
2017-03-02 22:41:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
func display(w io.Writer, statuses []statusInfo, start time.Time) {
|
|
|
|
var total int64
|
|
|
|
for _, status := range statuses {
|
|
|
|
total += status.Offset
|
|
|
|
switch status.Status {
|
|
|
|
case "downloading":
|
|
|
|
bar := progress.Bar(float64(status.Offset) / float64(status.Total))
|
|
|
|
fmt.Fprintf(w, "%s:\t%s\t%40r\t%8.8s/%s\t\n",
|
|
|
|
status.Ref,
|
|
|
|
status.Status,
|
|
|
|
bar,
|
|
|
|
progress.Bytes(status.Offset), progress.Bytes(status.Total))
|
|
|
|
case "resolving":
|
|
|
|
bar := progress.Bar(0.0)
|
|
|
|
fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
|
|
|
|
status.Ref,
|
|
|
|
status.Status,
|
|
|
|
bar)
|
|
|
|
default:
|
|
|
|
bar := progress.Bar(1.0)
|
|
|
|
fmt.Fprintf(w, "%s:\t%s\t%40r\t\n",
|
|
|
|
status.Ref,
|
|
|
|
status.Status,
|
|
|
|
bar)
|
2017-03-02 22:41:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-17 21:46:25 +00:00
|
|
|
fmt.Fprintf(w, "elapsed: %-4.1fs\ttotal: %7.6v\t(%v)\t\n",
|
|
|
|
time.Since(start).Seconds(),
|
|
|
|
// TODO(stevvooe): These calculations are actually way off.
|
|
|
|
// Need to account for previously downloaded data. These
|
|
|
|
// will basically be right for a download the first time
|
|
|
|
// but will be skewed if restarting, as it includes the
|
|
|
|
// data into the start time before.
|
|
|
|
progress.Bytes(total),
|
|
|
|
progress.NewBytesPerSecond(total, time.Since(start)))
|
2017-03-02 22:41:51 +00:00
|
|
|
}
|