1
0
mirror of https://github.com/drone/drone-cli.git synced 2026-01-16 16:01:35 +01:00

bump the drone runtime version

This commit is contained in:
Brad Rydzewski 2018-01-10 13:33:08 -08:00
parent c70270d38e
commit 1cb942f726
12 changed files with 385 additions and 94 deletions

@ -44,16 +44,22 @@ func toHostConfig(proc *backend.Step) *container.HostConfig {
Memory: proc.MemLimit,
MemorySwap: proc.MemSwapLimit,
},
LogConfig: container.LogConfig{
Type: "json-file",
},
Privileged: proc.Privileged,
ShmSize: proc.ShmSize,
Sysctls: proc.Sysctls,
}
// if len(proc.VolumesFrom) != 0 {
// config.VolumesFrom = proc.VolumesFrom
// }
if len(proc.NetworkMode) != 0 {
config.NetworkMode = container.NetworkMode(
proc.NetworkMode,
)
config.NetworkMode = container.NetworkMode(proc.NetworkMode)
}
if len(proc.IpcMode) != 0 {
config.IpcMode = container.IpcMode(proc.IpcMode)
}
if len(proc.DNS) != 0 {
config.DNS = proc.DNS
@ -70,6 +76,15 @@ func toHostConfig(proc *backend.Step) *container.HostConfig {
if len(proc.Volumes) != 0 {
config.Binds = proc.Volumes
}
config.Tmpfs = map[string]string{}
for _, path := range proc.Tmpfs {
if strings.Index(path, ":") == -1 {
config.Tmpfs[path] = ""
continue
}
parts := strings.Split(path, ":")
config.Tmpfs[parts[0]] = parts[1]
}
// if proc.OomKillDisable {
// config.OomKillDisable = &proc.OomKillDisable
// }

@ -31,6 +31,7 @@ type (
Command []string `json:"command,omitempty"`
ExtraHosts []string `json:"extra_hosts,omitempty"`
Volumes []string `json:"volumes,omitempty"`
Tmpfs []string `json:"tmpfs,omitempty"`
Devices []string `json:"devices,omitempty"`
Networks []Conn `json:"networks,omitempty"`
DNS []string `json:"dns,omitempty"`
@ -45,6 +46,8 @@ type (
OnSuccess bool `json:"on_success,omitempty"`
AuthConfig Auth `json:"auth_config,omitempty"`
NetworkMode string `json:"network_mode,omitempty"`
IpcMode string `json:"ipc_mode,omitempty"`
Sysctls map[string]string `json:"sysctls,omitempty"`
}
// Auth defines registry authentication credentials.

@ -33,6 +33,7 @@ type (
Remote string `json:"remote,omitempty"`
Private bool `json:"private,omitempty"`
Secrets []Secret `json:"secrets,omitempty"`
Branch string `json:"default_branch,omitempty"`
}
// Build defines runtime metadata for a build.
@ -181,7 +182,7 @@ func (m *Metadata) EnvironDrone() map[string]string {
"DRONE_REPO_OWNER": owner,
"DRONE_REPO_NAME": name,
"DRONE_REPO_LINK": m.Repo.Link,
"DRONE_REPO_BRANCH": m.Curr.Commit.Branch,
"DRONE_REPO_BRANCH": m.Repo.Branch,
"DRONE_REPO_PRIVATE": fmt.Sprintf("%v", m.Repo.Private),
"DRONE_REPO_TRUSTED": "false", // TODO should this be added?
"DRONE_REMOTE_URL": m.Repo.Remote,
@ -211,7 +212,7 @@ func (m *Metadata) EnvironDrone() map[string]string {
"DRONE_PREV_BUILD_NUMBER": fmt.Sprintf("%v", m.Prev.Number),
"DRONE_PREV_COMMIT_SHA": m.Prev.Commit.Sha,
}
if m.Curr.Event == EventTag {
if m.Curr.Event == EventTag || strings.HasPrefix(m.Curr.Commit.Ref, "refs/tags/") {
params["DRONE_TAG"] = strings.TrimPrefix(m.Curr.Commit.Ref, "refs/tags/")
}
if m.Curr.Event == EventPull {

@ -0,0 +1,105 @@
package compiler
import (
"path"
"strings"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
libcompose "github.com/docker/libcompose/yaml"
)
// Cacher defines a compiler transform that can be used
// to implement default caching for a repository.
type Cacher interface {
Restore(repo, branch string, mounts []string) *yaml.Container
Rebuild(repo, branch string, mounts []string) *yaml.Container
}
type volumeCacher struct {
base string
}
func (c *volumeCacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"restore": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
"fallback_to": "master.tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
func (c *volumeCacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/volume-cache:1.0.0",
Vargs: map[string]interface{}{
"mount": mounts,
"path": "/cache",
"rebuild": true,
"flush": true,
"file": strings.Replace(branch, "/", "_", -1) + ".tar",
},
Volumes: libcompose.Volumes{
Volumes: []*libcompose.Volume{
{
Source: path.Join(c.base, repo),
Destination: "/cache",
// TODO add access mode
},
},
},
}
}
type s3Cacher struct {
bucket string
access string
secret string
region string
}
func (c *s3Cacher) Restore(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
},
}
}
func (c *s3Cacher) Rebuild(repo, branch string, mounts []string) *yaml.Container {
return &yaml.Container{
Name: "rebuild_cache",
Image: "plugins/s3-cache:latest",
Vargs: map[string]interface{}{
"mount": mounts,
"access_key": c.access,
"secret_key": c.secret,
"bucket": c.bucket,
"region": c.region,
"rebuild": true,
"flush": true,
},
}
}

@ -6,7 +6,6 @@ import (
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/frontend"
"github.com/cncd/pipeline/pipeline/frontend/yaml"
// libcompose "github.com/docker/libcompose/yaml"
)
// TODO(bradrydzewski) compiler should handle user-defined volumes from YAML
@ -26,6 +25,15 @@ type Secret struct {
Match []string
}
type ResourceLimit struct {
MemSwapLimit int64
MemLimit int64
ShmSize int64
CPUQuota int64
CPUShares int64
CPUSet string
}
// Compiler compiles the yaml
type Compiler struct {
local bool
@ -39,6 +47,8 @@ type Compiler struct {
metadata frontend.Metadata
registries []Registry
secrets map[string]Secret
cacher Cacher
reslimit ResourceLimit
}
// New creates a new Compiler with options.
@ -86,8 +96,14 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
Image: "plugins/git:latest",
Vargs: map[string]interface{}{"depth": "0"},
}
switch c.metadata.Sys.Arch {
case "linux/arm":
container.Image = "plugins/git:linux-arm"
case "linux/arm64":
container.Image = "plugins/git:linux-arm64"
}
name := fmt.Sprintf("%s_clone", c.prefix)
step := c.createProcess(name, container)
step := c.createProcess(name, container, "clone")
stage := new(backend.Stage)
stage.Name = name
@ -105,13 +121,15 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
stage.Alias = container.Name
name := fmt.Sprintf("%s_clone_%d", c.prefix, i)
step := c.createProcess(name, container)
step := c.createProcess(name, container, "clone")
stage.Steps = append(stage.Steps, step)
config.Stages = append(config.Stages, stage)
}
}
c.setupCache(conf, config)
// add services steps
if len(conf.Services.Containers) != 0 {
stage := new(backend.Stage)
@ -124,7 +142,7 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
}
name := fmt.Sprintf("%s_services_%d", c.prefix, i)
step := c.createProcess(name, container)
step := c.createProcess(name, container, "services")
stage.Steps = append(stage.Steps, step)
}
@ -154,9 +172,45 @@ func (c *Compiler) Compile(conf *yaml.Config) *backend.Config {
}
name := fmt.Sprintf("%s_step_%d", c.prefix, i)
step := c.createProcess(name, container)
step := c.createProcess(name, container, "pipeline")
stage.Steps = append(stage.Steps, step)
}
c.setupCacheRebuild(conf, config)
return config
}
func (c *Compiler) setupCache(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.cacher == nil {
return
}
container := c.cacher.Restore(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_restore_cache", c.prefix)
step := c.createProcess(name, container, "cache")
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "restore_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}
func (c *Compiler) setupCacheRebuild(conf *yaml.Config, ir *backend.Config) {
if c.local || len(conf.Cache) == 0 || c.metadata.Curr.Event != "push" || c.cacher == nil {
return
}
container := c.cacher.Rebuild(c.metadata.Repo.Name, c.metadata.Curr.Commit.Branch, conf.Cache)
name := fmt.Sprintf("%s_rebuild_cache", c.prefix)
step := c.createProcess(name, container, "cache")
stage := new(backend.Stage)
stage.Name = name
stage.Alias = "rebuild_cache"
stage.Steps = append(stage.Steps, step)
ir.Stages = append(ir.Stages, stage)
}

@ -9,7 +9,7 @@ import (
"github.com/cncd/pipeline/pipeline/frontend/yaml"
)
func (c *Compiler) createProcess(name string, container *yaml.Container) *backend.Step {
func (c *Compiler) createProcess(name string, container *yaml.Container, section string) *backend.Step {
var (
detached bool
workingdir string
@ -20,6 +20,7 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
command = container.Command
image = expandImage(container.Image)
network_mode = container.NetworkMode
ipc_mode = container.IpcMode
// network = container.Network
)
@ -62,30 +63,39 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
// TODO: This is here for backward compatibility and will eventually be removed.
environment["DRONE_WORKSPACE"] = path.Join(c.base, c.path)
if !isService(container) {
workingdir = path.Join(c.base, c.path)
}
if isService(container) {
if section == "services" || container.Detached {
detached = true
}
if isPlugin(container) {
paramsToEnv(container.Vargs, environment)
if detached == false || len(container.Commands) != 0 {
workingdir = path.Join(c.base, c.path)
}
if matchImage(container.Image, c.escalated...) {
privileged = true
entrypoint = []string{}
command = []string{}
if detached == false {
paramsToEnv(container.Vargs, environment)
}
if len(container.Commands) != 0 {
if c.metadata.Sys.Arch == "windows/amd64" {
// TODO provide windows implementation
entrypoint = []string{"/bin/sh", "-c"}
command = []string{"echo $CI_SCRIPT | base64 -d | /bin/sh -e"}
environment["CI_SCRIPT"] = generateScriptWindows(container.Commands)
environment["HOME"] = "/root"
environment["SHELL"] = "/bin/sh"
} else {
entrypoint = []string{"/bin/sh", "-c"}
command = []string{"echo $CI_SCRIPT | base64 -d | /bin/sh -e"}
environment["CI_SCRIPT"] = generateScriptPosix(container.Commands)
environment["HOME"] = "/root"
environment["SHELL"] = "/bin/sh"
}
}
if isShell(container) {
entrypoint = []string{"/bin/sh", "-c"}
command = []string{"echo $CI_SCRIPT | base64 -d | /bin/sh -e"}
environment["CI_SCRIPT"] = generateScriptPosix(container.Commands)
environment["HOME"] = "/root"
environment["SHELL"] = "/bin/sh"
if matchImage(container.Image, c.escalated...) {
privileged = true
entrypoint = []string{}
command = []string{}
}
authConfig := backend.Auth{
@ -109,6 +119,31 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
}
}
memSwapLimit := int64(container.MemSwapLimit)
if c.reslimit.MemSwapLimit != 0 {
memSwapLimit = c.reslimit.MemSwapLimit
}
memLimit := int64(container.MemLimit)
if c.reslimit.MemLimit != 0 {
memLimit = c.reslimit.MemLimit
}
shmSize := int64(container.ShmSize)
if c.reslimit.ShmSize != 0 {
shmSize = c.reslimit.ShmSize
}
cpuQuota := int64(container.CPUQuota)
if c.reslimit.CPUQuota != 0 {
cpuQuota = c.reslimit.CPUQuota
}
cpuShares := int64(container.CPUShares)
if c.reslimit.CPUShares != 0 {
cpuShares = c.reslimit.CPUShares
}
cpuSet := container.CPUSet
if c.reslimit.CPUSet != "" {
cpuSet = c.reslimit.CPUSet
}
return &backend.Step{
Name: name,
Alias: container.Name,
@ -123,33 +158,24 @@ func (c *Compiler) createProcess(name string, container *yaml.Container) *backen
Command: command,
ExtraHosts: container.ExtraHosts,
Volumes: volumes,
Tmpfs: container.Tmpfs,
Devices: container.Devices,
Networks: networks,
DNS: container.DNS,
DNSSearch: container.DNSSearch,
MemSwapLimit: int64(container.MemSwapLimit),
MemLimit: int64(container.MemLimit),
ShmSize: int64(container.ShmSize),
CPUQuota: int64(container.CPUQuota),
CPUShares: int64(container.CPUShares),
CPUSet: container.CPUSet,
MemSwapLimit: memSwapLimit,
MemLimit: memLimit,
ShmSize: shmSize,
Sysctls: container.Sysctls,
CPUQuota: cpuQuota,
CPUShares: cpuShares,
CPUSet: cpuSet,
AuthConfig: authConfig,
OnSuccess: container.Constraints.Status.Match("success"),
OnFailure: (len(container.Constraints.Status.Include)+
len(container.Constraints.Status.Exclude) != 0) &&
container.Constraints.Status.Match("failure"),
NetworkMode: network_mode,
IpcMode: ipc_mode,
}
}
func isPlugin(c *yaml.Container) bool {
return len(c.Vargs) != 0
}
func isShell(c *yaml.Container) bool {
return len(c.Commands) != 0
}
func isService(c *yaml.Container) bool {
return c.Detached || (isPlugin(c) == false && isShell(c) == false)
}

@ -104,7 +104,7 @@ func WithWorkspaceFromURL(base, link string) Option {
path := "src"
parsed, err := url.Parse(link)
if err == nil {
path = filepath.Join(path, parsed.Host, parsed.Path)
path = filepath.Join(path, parsed.Hostname(), parsed.Path)
}
return WithWorkspace(base, path)
}
@ -145,6 +145,34 @@ func WithEnviron(env map[string]string) Option {
}
}
// WithCacher configures the compiler with default cache settings.
func WithCacher(cacher Cacher) Option {
return func(compiler *Compiler) {
compiler.cacher = cacher
}
}
// WithVolumeCacher configures the compiler with default local volume
// caching enabled.
func WithVolumeCacher(base string) Option {
return func(compiler *Compiler) {
compiler.cacher = &volumeCacher{base: base}
}
}
// WithS3Cacher configures the compiler with default amazon s3
// caching enabled.
func WithS3Cacher(access, secret, region, bucket string) Option {
return func(compiler *Compiler) {
compiler.cacher = &s3Cacher{
access: access,
secret: secret,
bucket: bucket,
region: region,
}
}
}
// WithProxy configures the compiler with HTTP_PROXY, HTTPS_PROXY,
// and NO_PROXY environment variables added by default to every
// container in the pipeline.
@ -169,6 +197,21 @@ func WithNetworks(networks ...string) Option {
}
}
// WithResourceLimit configures the compiler with default resource limits that
// are applied each container in the pipeline.
func WithResourceLimit(swap, mem, shmsize, cpuQuota, cpuShares int64, cpuSet string) Option {
return func(compiler *Compiler) {
compiler.reslimit = ResourceLimit{
MemSwapLimit: swap,
MemLimit: mem,
ShmSize: shmsize,
CPUQuota: cpuQuota,
CPUShares: cpuShares,
CPUSet: cpuSet,
}
}
}
// TODO(bradrydzewski) consider an alternate approach to
// WithProxy where the proxy strings are passed directly
// to the function as named parameters.

@ -1 +1,5 @@
package compiler
func generateScriptWindows(commands []string) string {
return ""
}

@ -4,13 +4,14 @@ import (
"path/filepath"
"github.com/cncd/pipeline/pipeline/frontend"
libcompose "github.com/docker/libcompose/yaml"
"github.com/cncd/pipeline/pipeline/frontend/yaml/types"
libcompose "github.com/docker/libcompose/yaml"
)
type (
// Constraints defines a set of runtime constraints.
Constraints struct {
Ref Constraint
Repo Constraint
Instance Constraint
Platform Constraint
@ -43,6 +44,8 @@ func (c *Constraints) Match(metadata frontend.Metadata) bool {
c.Event.Match(metadata.Curr.Event) &&
c.Branch.Match(metadata.Curr.Commit.Branch) &&
c.Repo.Match(metadata.Repo.Name) &&
c.Ref.Match(metadata.Curr.Commit.Ref) &&
c.Instance.Match(metadata.Sys.Host) &&
c.Matrix.Match(metadata.Job.Matrix)
}

@ -32,6 +32,7 @@ type (
CPUShares libcompose.StringorInt `yaml:"cpu_shares,omitempty"`
Detached bool `yaml:"detach,omitempty"`
Devices []string `yaml:"devices,omitempty"`
Tmpfs []string `yaml:"tmpfs,omitempty"`
DNS libcompose.Stringorslice `yaml:"dns,omitempty"`
DNSSearch libcompose.Stringorslice `yaml:"dns_search,omitempty"`
Entrypoint libcompose.Command `yaml:"entrypoint,omitempty"`
@ -46,6 +47,7 @@ type (
MemSwappiness libcompose.MemStringorInt `yaml:"mem_swappiness,omitempty"`
Name string `yaml:"name,omitempty"`
NetworkMode string `yaml:"network_mode,omitempty"`
IpcMode string `yaml:"ipc_mode,omitempty"`
Networks libcompose.Networks `yaml:"networks,omitempty"`
Privileged bool `yaml:"privileged,omitempty"`
Pull bool `yaml:"pull,omitempty"`
@ -53,6 +55,7 @@ type (
Ulimits libcompose.Ulimits `yaml:"ulimits,omitempty"`
Volumes libcompose.Volumes `yaml:"volumes,omitempty"`
Secrets Secrets `yaml:"secrets,omitempty"`
Sysctls libcompose.SliceorMap `yaml:"sysctls,omitempty"`
Constraints Constraints `yaml:"when,omitempty"`
Vargs map[string]interface{} `yaml:",inline"`
}

@ -6,6 +6,12 @@ import (
"github.com/cncd/pipeline/pipeline/frontend/yaml"
)
const (
blockClone uint8 = iota
blockPipeline
blockServices
)
// A Linter lints a pipeline configuration.
type Linter struct {
trusted bool
@ -22,10 +28,22 @@ func New(opts ...Option) *Linter {
// Lint lints the configuration.
func (l *Linter) Lint(c *yaml.Config) error {
var containers []*yaml.Container
containers = append(containers, c.Pipeline.Containers...)
containers = append(containers, c.Services.Containers...)
if len(c.Pipeline.Containers) == 0 {
return fmt.Errorf("Invalid or missing pipeline section")
}
if err := l.lint(c.Clone.Containers, blockClone); err != nil {
return err
}
if err := l.lint(c.Pipeline.Containers, blockPipeline); err != nil {
return err
}
if err := l.lint(c.Services.Containers, blockServices); err != nil {
return err
}
return nil
}
func (l *Linter) lint(containers []*yaml.Container, block uint8) error {
for _, container := range containers {
if err := l.lintImage(container); err != nil {
return err
@ -35,15 +53,14 @@ func (l *Linter) Lint(c *yaml.Config) error {
return err
}
}
if isService(container) == false {
if block != blockServices && !container.Detached {
if err := l.lintEntrypoint(container); err != nil {
return err
}
}
}
if len(c.Pipeline.Containers) == 0 {
return fmt.Errorf("Invalid or missing pipeline section")
if err := l.lintCommands(container); err != nil {
return err
}
}
return nil
}
@ -55,6 +72,26 @@ func (l *Linter) lintImage(c *yaml.Container) error {
return nil
}
func (l *Linter) lintCommands(c *yaml.Container) error {
if len(c.Commands) == 0 {
return nil
}
if len(c.Vargs) != 0 {
var keys []string
for key := range c.Vargs {
keys = append(keys, key)
}
return fmt.Errorf("Cannot configure both commands and custom attributes %v", keys)
}
if len(c.Entrypoint) != 0 {
return fmt.Errorf("Cannot configure both commands and entrypoint attributes")
}
if len(c.Command) != 0 {
return fmt.Errorf("Cannot configure both commands and command attributes")
}
return nil
}
func (l *Linter) lintEntrypoint(c *yaml.Container) error {
if len(c.Entrypoint) != 0 {
return fmt.Errorf("Cannot override container entrypoint")
@ -87,23 +124,20 @@ func (l *Linter) lintTrusted(c *yaml.Container) error {
if len(c.NetworkMode) != 0 {
return fmt.Errorf("Insufficient privileges to use network_mode")
}
if len(c.IpcMode) != 0 {
return fmt.Errorf("Insufficient privileges to use ipc_mode")
}
if len(c.Sysctls) != 0 {
return fmt.Errorf("Insufficient privileges to use sysctls")
}
if c.Networks.Networks != nil && len(c.Networks.Networks) != 0 {
return fmt.Errorf("Insufficient privileges to use networks")
}
if c.Volumes.Volumes != nil && len(c.Volumes.Volumes) != 0 {
return fmt.Errorf("Insufficient privileges to use volumes")
}
if len(c.Tmpfs) != 0 {
return fmt.Errorf("Insufficient privileges to use tmpfs")
}
return nil
}
func isService(c *yaml.Container) bool {
return !isScript(c) && !isPlugin(c)
}
func isScript(c *yaml.Container) bool {
return len(c.Commands) != 0
}
func isPlugin(c *yaml.Container) bool {
return len(c.Vargs) != 0
}

52
vendor/vendor.json vendored

@ -19,62 +19,62 @@
{
"checksumSHA1": "W3AuK8ocqHwlUajGmQLFvnRhTZE=",
"path": "github.com/cncd/pipeline/pipeline",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "rO+djTfB4LrT+FBbpotyUUobOtU=",
"checksumSHA1": "iRKdpheRPBTP0DKTQH7zmE2PI34=",
"path": "github.com/cncd/pipeline/pipeline/backend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "gLqopO27JUHpxbV+jxggCMzqROY=",
"checksumSHA1": "EHJGG1USUliP8nzNWV/axO5KLzw=",
"path": "github.com/cncd/pipeline/pipeline/backend/docker",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "8Hj/OZnYZyz5N2hqENCTTaGtkNQ=",
"checksumSHA1": "HWV2BBLXS4gY5eLJeNIg7Z6nAOA=",
"path": "github.com/cncd/pipeline/pipeline/frontend",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "9KYIsY8WlWbrRAP7caEpWT70P9c=",
"checksumSHA1": "ncGH2MfHDtM7/dNzj2i+lnXFnf4=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "Pyldit0XriIzyFUmnvjPrghJLzw=",
"checksumSHA1": "cdjOSSSS5Gzx7gRLNvObQvNJWYg=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/compiler",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "Q0GkNUFamVYIA1Fd8r0A5M6Gx54=",
"checksumSHA1": "Sj2VYU+asWToYriIqcinav5MJZo=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/linter",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "L7Q5qJmPITNmvFEEaj5MPwCWFRk=",
"path": "github.com/cncd/pipeline/pipeline/frontend/yaml/types",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "2/3f3oNmxXy5kcrRLCFa24Oc9O4=",
"path": "github.com/cncd/pipeline/pipeline/interrupt",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "uOjTfke7Qxosrivgz/nVTHeIP5g=",
"path": "github.com/cncd/pipeline/pipeline/multipart",
"revision": "29b52c5e09e461941f262919903e2cc5a49de080",
"revisionTime": "2017-06-03T15:27:13Z"
"revision": "3a09486affc9215ba52f55b1f6e10182458d1aba",
"revisionTime": "2018-01-10T21:28:38Z"
},
{
"checksumSHA1": "rSxOx+SnSLAxR4ST8fSz9hhJLdk=",