diff --git a/go.mod b/go.mod index 1e837928b8f4..e37713a28767 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,8 @@ module github.com/moby/buildkit go 1.22.0 -// FIXME(thaJeztah): testing https://github.com/moby/moby/pull/49087 (split-idtools-internal branch) -replace github.com/docker/docker => github.com/dmcgowan/docker v1.1.3-0.20241223115939-cb7934ca2190 +// FIXME(thaJeztah): testing https://github.com/moby/moby/pull/49117 (archive-remove-pools branch) +replace github.com/docker/docker => github.com/dmcgowan/docker v1.1.3-0.20241225060105-be4eac753f17 replace github.com/docker/cli => github.com/docker/cli v27.0.2-0.20241223130549-3b49deb8fcce+incompatible diff --git a/go.sum b/go.sum index a72db85c92a9..f0cd79a908ae 100644 --- a/go.sum +++ b/go.sum @@ -138,8 +138,8 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/dmcgowan/docker v1.1.3-0.20241223115939-cb7934ca2190 h1:R75noWP/4Jlxmm2cul3siHPUJIRxeDLB6o6pC2OyqBA= -github.com/dmcgowan/docker v1.1.3-0.20241223115939-cb7934ca2190/go.mod h1:AKi7Z6FzccBGsB9PP0AbY4O0g9QR4Y5RmrVhRbN4r3Q= +github.com/dmcgowan/docker v1.1.3-0.20241225060105-be4eac753f17 h1:nz0I0IMSnkvrM+j1cWRUte9tMlGWjfclv5o2M1jgW6U= +github.com/dmcgowan/docker v1.1.3-0.20241225060105-be4eac753f17/go.mod h1:AKi7Z6FzccBGsB9PP0AbY4O0g9QR4Y5RmrVhRbN4r3Q= github.com/docker/cli v27.0.2-0.20241223130549-3b49deb8fcce+incompatible h1:RME8KUlj0XJHOTFGV8iEYVjUtoqjn0IPDe8l7ytkBPg= github.com/docker/cli v27.0.2-0.20241223130549-3b49deb8fcce+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 365e8f18ed50..b7eae21328b4 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -19,13 +19,13 @@ import ( "runtime/debug" "strconv" "strings" + "sync" "sync/atomic" "syscall" "time" "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" "github.com/klauspost/compress/zstd" "github.com/moby/patternmatcher" "github.com/moby/sys/sequential" @@ -230,13 +230,51 @@ func (r *readCloserWrapper) Close() error { return nil } - return r.closer() + if r.closer != nil { + return r.closer() + } + return nil +} + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) } // DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) + buf := newBufferedReader(archive) bs, err := buf.Peek(10) if err != nil && err != io.EOF { // Note: we'll ignore any io.EOF error because there are some odd @@ -248,26 +286,12 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { return nil, err } - wrapReader := func(r io.Reader, cancel context.CancelFunc) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: func() error { - if cancel != nil { - cancel() - } - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - p.Put(buf) - return nil - }, - } - } - compression := DetectCompression(bs) switch compression { case Uncompressed: - return wrapReader(buf, nil), nil + return &readCloserWrapper{ + Reader: buf, + }, nil case Gzip: ctx, cancel := context.WithCancel(context.Background()) @@ -276,10 +300,18 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { cancel() return nil, err } - return wrapReader(gzReader, cancel), nil + return &readCloserWrapper{ + Reader: gzReader, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil case Bzip2: bz2Reader := bzip2.NewReader(buf) - return wrapReader(bz2Reader, nil), nil + return &readCloserWrapper{ + Reader: bz2Reader, + }, nil case Xz: ctx, cancel := context.WithCancel(context.Background()) @@ -288,30 +320,44 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { cancel() return nil, err } - return wrapReader(xzReader, cancel), nil + + return &readCloserWrapper{ + Reader: xzReader, + closer: func() error { + cancel() + return xzReader.Close() + }, + }, nil case Zstd: zstdReader, err := zstd.NewReader(buf) if err != nil { return nil, err } - return wrapReader(zstdReader, nil), nil + return &readCloserWrapper{ + Reader: zstdReader, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil default: return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) } } +type nopWriteCloser struct { + io.Writer +} + +func (nopWriteCloser) Close() error { return nil } + // CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) switch compression { case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil + return nopWriteCloser{dest}, nil case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil + return gzip.NewWriter(dest), nil case Bzip2, Xz: // archive/bzip2 does not support writing, and there is no xz support at all // However, this is not a problem as docker only currently generates gzipped tars @@ -382,7 +428,7 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi pipeWriter.CloseWithError(err) return } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { + if _, err := copyWithBuffer(tarWriter, tarReader); err != nil { pipeWriter.CloseWithError(err) return } @@ -529,7 +575,6 @@ type tarWhiteoutConverter interface { type tarAppender struct { TarWriter *tar.Writer - Buffer *bufio.Writer // for hardlink mapping SeenFiles map[uint64]string @@ -547,7 +592,6 @@ func newTarAppender(idMapping idtools.IdentityMapping, writer io.Writer, chownOp return &tarAppender{ SeenFiles: make(map[uint64]string), TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), IdentityMapping: idMapping, ChownOpts: chownOpts, } @@ -665,17 +709,11 @@ func (ta *tarAppender) addTarFile(path, name string) error { return err } - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = pools.Copy(ta.Buffer, file) + _, err = copyWithBuffer(ta.TarWriter, file) file.Close() if err != nil { return err } - err = ta.Buffer.Flush() - if err != nil { - return err - } } return nil @@ -718,7 +756,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, o if err != nil { return err } - if _, err := pools.Copy(file, reader); err != nil { + if _, err := copyWithBuffer(file, reader); err != nil { file.Close() return err } @@ -929,9 +967,6 @@ func (t *Tarballer) Do() { } }() - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - // In general we log errors here but ignore them because // during e.g. a diff operation the container can continue // mutating the filesystem and we can see transient errors @@ -1087,8 +1122,6 @@ func (t *Tarballer) Do() { // Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) @@ -1165,7 +1198,6 @@ loop: } } } - trBuf.Reset(tr) if err := remapIDs(options.IDMap, hdr); err != nil { return err @@ -1181,7 +1213,7 @@ loop: } } - if err := createTarFile(path, dest, hdr, trBuf, options); err != nil { + if err := createTarFile(path, dest, hdr, tr, options); err != nil { return err } @@ -1384,7 +1416,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { if err := tw.WriteHeader(hdr); err != nil { return err } - if _, err := pools.Copy(tw, srcF); err != nil { + if _, err := copyWithBuffer(tw, srcF); err != nil { return err } return nil diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 423605fbb784..79c810a6819b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -15,7 +15,6 @@ import ( "github.com/containerd/log" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" ) // ChangeType represents the change type. @@ -389,9 +388,6 @@ func ExportChanges(dir string, changes []Change, idMap idtools.IdentityMapping) go func() { ta := newTarAppender(idMap, writer, nil) - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - sort.Sort(changesByPath(changes)) // In general we log errors here but ignore them because diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index ee4020c2a225..cddf18ecdb8b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" "strings" + "sync" "github.com/containerd/log" ) @@ -20,6 +21,17 @@ var ( ErrInvalidCopySource = errors.New("invalid copy source content") ) +var copyPool = sync.Pool{ + New: func() interface{} { s := make([]byte, 32*1024); return &s }, +} + +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + buf := copyPool.Get().(*[]byte) + written, err = io.CopyBuffer(dst, src, *buf) + copyPool.Put(buf) + return +} + // PreserveTrailingDotOrSeparator returns the given cleaned path (after // processing using any utility functions from the path or filepath stdlib // packages) and appends a trailing `/.` or `/` if its corresponding original diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index 6a05643ab6e1..d5a394cdc953 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -11,7 +11,6 @@ import ( "strings" "github.com/containerd/log" - "github.com/docker/docker/pkg/pools" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be @@ -19,8 +18,6 @@ import ( // Returns the size in bytes of the contents of the layer. func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header unpackedPaths := make(map[string]struct{}) @@ -159,8 +156,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, } } - trBuf.Reset(tr) - srcData := io.Reader(trBuf) + srcData := io.Reader(tr) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so diff --git a/vendor/github.com/docker/docker/pkg/idtools/const_windows.go b/vendor/github.com/docker/docker/pkg/idtools/const_windows.go deleted file mode 100644 index 0120bdf8e661..000000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/const_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package idtools - -const ( - // Deprecated: copy value locally - SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" -) - -const ( - // Deprecated: copy value locally - ContainerAdministratorSidString = "S-1-5-93-2-1" - - // Deprecated: copy value locally - ContainerUserSidString = "S-1-5-93-2-2" -) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go index d2fbd943a656..82b325a2b72b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -1,8 +1,11 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( + "bufio" "fmt" "os" + "strconv" + "strings" ) // IDMap contains a single entry for user namespace range remapping. An array @@ -14,6 +17,22 @@ type IDMap struct { Size int `json:"size"` } +type subIDRange struct { + Start int + Length int +} + +type subIDRanges []subIDRange + +func (e subIDRanges) Len() int { return len(e) } +func (e subIDRanges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e subIDRanges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" +) + // MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership and permissions. @@ -143,6 +162,67 @@ func (i IdentityMapping) Empty() bool { return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0 } +func createIDMap(subidRanges subIDRanges) []IDMap { + idMap := []IDMap{} + + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (subIDRanges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (subIDRanges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found subIDRanges for a specified username. If the special value +// "ALL" is supplied for username, then all subIDRanges in the file will be returned +func parseSubidFile(path, username string) (subIDRanges, error) { + var rangeList subIDRanges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + + return rangeList, s.Err() +} + // CurrentIdentity returns the identity of the current process func CurrentIdentity() Identity { return Identity{UID: os.Getuid(), GID: os.Getegid()} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 0f8c1dc86c0e..cd621bdcc2ae 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,11 +1,18 @@ //go:build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( + "bytes" + "fmt" + "io" "os" + "os/exec" "path/filepath" + "strconv" "syscall" + + "github.com/moby/sys/user" ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { @@ -65,6 +72,129 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting return nil } +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(name string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(name) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(name) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(strconv.Itoa(uid)) +} + +func getentUser(name string) (user.User, error) { + reader, err := callGetent("passwd", name) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(name string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(name) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(name) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(strconv.Itoa(gid)) +} + +func getentGroup(name string) (user.Group, error) { + reader, err := callGetent("group", name) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) + } + return groups[0], nil +} + +func callGetent(database, key string) (io.Reader, error) { + getentCmd, err := resolveBinary("getent") + // if no `getent` command within the execution environment, can't do anything else + if err != nil { + return nil, fmt.Errorf("unable to find getent command: %w", err) + } + command := exec.Command(getentCmd, database, key) + // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin + command.Stdin = io.NopCloser(bytes.NewReader(nil)) + out, err := command.CombinedOutput() + if err != nil { + exitCode, errC := getExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + } + return bytes.NewReader(out), nil +} + +// getExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func getExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. @@ -88,3 +218,61 @@ func setPermissions(p string, mode os.FileMode, owner Identity, stat os.FileInfo } return os.Chown(p, owner.UID, owner.GID) } + +// LoadIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func LoadIdentityMapping(name string) (IdentityMapping, error) { + usr, err := LookupUser(name) + if err != nil { + return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %v", name, err) + } + + subuidRanges, err := lookupSubUIDRanges(usr) + if err != nil { + return IdentityMapping{}, err + } + subgidRanges, err := lookupSubGIDRanges(usr) + if err != nil { + return IdentityMapping{}, err + } + + return IdentityMapping{ + UIDMaps: subuidRanges, + GIDMaps: subgidRanges, + }, nil +} + +func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubuid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} + +func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubgid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, fmt.Errorf("no subgid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index d98e7d721624..5b7c2ad771bc 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -1,9 +1,18 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "os" ) +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + // This is currently a wrapper around [os.MkdirAll] since currently // permissions aren't set through this path, the identity isn't utilized. // Ownership is handled elsewhere, but in the future could be support here diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000000..7fd6c413d451 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,166 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) +) + +const ( + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := exec.Command("id", name).CombinedOutput() + if err != nil { + return -1, -1, fmt.Errorf("error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(name string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + var args []string + switch userCommand { + case "adduser": + args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} + case "useradd": + args = []string{"-r", "-s", "/bin/false", name} + default: + return fmt.Errorf("cannot add user; no useradd/adduser binary found") + } + + if out, err := exec.Command(userCommand, args...).CombinedOutput(); err != nil { + return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("can't find available subuid range: %v", err) + } + idRange := fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1) + out, err := exec.Command("usermod", "-v", idRange, name).CombinedOutput() + if err != nil { + return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("can't find available subgid range: %v", err) + } + idRange := fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1) + out, err := exec.Command("usermod", "-w", idRange, name).CombinedOutput() + if err != nil { + return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList subIDRanges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000000..6a9311c4a750 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +//go:build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 000000000000..517a2f52ca2f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,26 @@ +//go:build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + // only return no error if the final resolved binary basename + // matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 3ea3012b188b..000000000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools // import "github.com/docker/docker/pkg/pools" - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -const buffer32K = 32 * 1024 - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) - buffer32KPool = newBufferPoolWithSize(buffer32K) -) - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -type bufferPool struct { - pool sync.Pool -} - -func newBufferPoolWithSize(size int) *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() interface{} { s := make([]byte, size); return &s }, - }, - } -} - -func (bp *bufferPool) Get() *[]byte { - return bp.pool.Get().(*[]byte) -} - -func (bp *bufferPool) Put(b *[]byte) { - bp.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := buffer32KPool.Get() - written, err = io.CopyBuffer(dst, src, *buf) - buffer32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.WriteCloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index df72065e43e3..25221d758517 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -466,7 +466,7 @@ github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/credentials github.com/docker/cli/cli/config/types github.com/docker/cli/cli/connhelper/commandconn -# github.com/docker/docker v27.4.0+incompatible => github.com/dmcgowan/docker v1.1.3-0.20241223115939-cb7934ca2190 +# github.com/docker/docker v27.4.0+incompatible => github.com/dmcgowan/docker v1.1.3-0.20241225060105-be4eac753f17 ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -498,7 +498,6 @@ github.com/docker/docker/pkg/archive github.com/docker/docker/pkg/chrootarchive github.com/docker/docker/pkg/idtools github.com/docker/docker/pkg/ioutils -github.com/docker/docker/pkg/pools github.com/docker/docker/profiles/seccomp # github.com/docker/docker-credential-helpers v0.8.2 ## explicit; go 1.19 @@ -1104,5 +1103,5 @@ kernel.org/pub/linux/libs/security/libcap/cap # kernel.org/pub/linux/libs/security/libcap/psx v1.2.70 ## explicit; go 1.11 kernel.org/pub/linux/libs/security/libcap/psx -# github.com/docker/docker => github.com/dmcgowan/docker v1.1.3-0.20241223115939-cb7934ca2190 +# github.com/docker/docker => github.com/dmcgowan/docker v1.1.3-0.20241225060105-be4eac753f17 # github.com/docker/cli => github.com/docker/cli v27.0.2-0.20241223130549-3b49deb8fcce+incompatible