Switch from pkg/errors to Go 1.13+ error wrapping

The github.com/pkg/errors is mostly obsoleted since Go 1.13 introduced
%w-style error wrapping. It is also not maintained and is now archived
by the owner.

Some part of this change was done manually, and some changes were done
by using github.com/AkihiroSuda/go-wrap-to-percent-w tool.

In a few places this also:
 - changes '%s' or \"%s\" to %q;
 - removes extra context from the error message (such as, errors from os
   functions dealing with files do contain the file name already, and
   strconv.Atoi errors already contains the string which it failed to
   parse).

Note that there is a single place which uses StackTrace functionality of
pkg/errors, which is removed by this commit.

A few remaining users of pkg/errors vendored here (directly and
indirectly) are:
 - github.com/containerd/go-runc (needs to be bumped to v1.1.0);
 - github.com/microsoft/didx509go (needs https://github.com/microsoft/didx509go/pull/19);
 - github.com/docker/cli (needs https://github.com/docker/cli/issues/3618 fixed);
 - github.com/docker/docker (?)
 - github.com/linuxkit/virtsock (needs https://github.com/linuxkit/virtsock/pull/69 merged);

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
This commit is contained in:
Kir Kolyshkin 2024-10-03 18:18:47 -07:00
Родитель 3bd12f1e6a
Коммит 1cabea0a21
152 изменённых файлов: 887 добавлений и 940 удалений

Просмотреть файл

@ -4,10 +4,10 @@ package main
import (
"context"
"fmt"
task "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/errdefs"
"github.com/pkg/errors"
)
type shimExecState string
@ -86,11 +86,6 @@ type shimExec interface {
}
func newExecInvalidStateError(tid, eid string, state shimExecState, op string) error {
return errors.Wrapf(
errdefs.ErrFailedPrecondition,
"exec: '%s' in task: '%s' is in invalid state: '%s' for %s",
eid,
tid,
state,
op)
return fmt.Errorf("exec: %q in task: %q is in invalid state: %q for %s: %w",
eid, tid, state, op, errdefs.ErrFailedPrecondition)
}

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"fmt"
"sync"
"time"
@ -13,7 +14,6 @@ import (
"github.com/containerd/containerd/runtime"
"github.com/containerd/errdefs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/types/known/timestamppb"
@ -42,7 +42,8 @@ func newHcsExec(
id, bundle string,
isWCOW bool,
spec *specs.Process,
io cmd.UpstreamIO) shimExec {
io cmd.UpstreamIO,
) shimExec {
log.G(ctx).WithFields(logrus.Fields{
"tid": tid,
"eid": id, // Init exec ID is always same as Task ID
@ -287,7 +288,7 @@ func (he *hcsExec) Kill(ctx context.Context, signal uint32) error {
}
}
if err != nil {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "signal %d: %v", signal, err)
return fmt.Errorf("signal %d: %w: %w", signal, err, errdefs.ErrFailedPrecondition)
}
var delivered bool
if supported && options != nil {
@ -331,11 +332,11 @@ func (he *hcsExec) Kill(ctx context.Context, signal uint32) error {
return err
}
if !delivered {
return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", he.id, he.tid)
return fmt.Errorf("exec: %q in task: %q: %w", he.id, he.tid, errdefs.ErrNotFound)
}
return nil
case shimExecStateExited:
return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", he.id, he.tid)
return fmt.Errorf("exec: %q in task: %q: %w", he.id, he.tid, errdefs.ErrNotFound)
default:
return newExecInvalidStateError(he.tid, he.id, he.state, "kill")
}
@ -345,7 +346,7 @@ func (he *hcsExec) ResizePty(ctx context.Context, width, height uint32) error {
he.sl.Lock()
defer he.sl.Unlock()
if !he.io.Terminal() {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '%s' in task: '%s' is not a tty", he.id, he.tid)
return fmt.Errorf("exec: %q in task: %q is not a tty: %w", he.id, he.tid, errdefs.ErrFailedPrecondition)
}
if he.state == shimExecStateRunning {

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"fmt"
"sync"
"time"
@ -13,7 +14,6 @@ import (
containerd_v1_types "github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/runtime"
"github.com/containerd/errdefs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/protobuf/types/known/timestamppb"
)
@ -166,7 +166,7 @@ func (wpse *wcowPodSandboxExec) Kill(ctx context.Context, signal uint32) error {
close(wpse.exited)
return nil
case shimExecStateExited:
return errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", wpse.tid, wpse.tid)
return fmt.Errorf("exec: %q in task: %q: %w", wpse.tid, wpse.tid, errdefs.ErrNotFound)
default:
return newExecInvalidStateError(wpse.tid, wpse.tid, wpse.state, "kill")
}
@ -177,7 +177,7 @@ func (wpse *wcowPodSandboxExec) ResizePty(ctx context.Context, width, height uin
defer wpse.sl.Unlock()
// We will never have IO for a sandbox container so we wont have a tty
// either.
return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '%s' in task: '%s' is not a tty", wpse.tid, wpse.tid)
return fmt.Errorf("exec: %q in task: %q is not a tty: %w", wpse.tid, wpse.tid, errdefs.ErrFailedPrecondition)
}
func (wpse *wcowPodSandboxExec) CloseIO(ctx context.Context, stdin bool) error {

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -21,7 +22,6 @@ import (
"github.com/containerd/containerd/runtime"
"github.com/containerd/errdefs"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
@ -73,7 +73,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques
log.G(ctx).WithField("tid", req.ID).Debug("createPod")
if osversion.Build() < osversion.RS5 {
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "pod support is not available on Windows versions previous to RS5 (%d)", osversion.RS5)
return nil, fmt.Errorf("pod support is not available on Windows versions previous to RS5 (%d): %w", osversion.RS5, errdefs.ErrFailedPrecondition)
}
ct, sid, err := oci.GetSandboxTypeAndID(s.Annotations)
@ -81,20 +81,20 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques
return nil, err
}
if ct != oci.KubernetesContainerTypeSandbox {
return nil, errors.Wrapf(
errdefs.ErrFailedPrecondition,
"expected annotation: '%s': '%s' got '%s'",
return nil, fmt.Errorf(
"expected annotation: %q: %q, got %q: %w",
annotations.KubernetesContainerType,
oci.KubernetesContainerTypeSandbox,
ct)
ct,
errdefs.ErrFailedPrecondition)
}
if sid != req.ID {
return nil, errors.Wrapf(
errdefs.ErrFailedPrecondition,
"expected annotation '%s': '%s' got '%s'",
return nil, fmt.Errorf(
"expected annotation %q: %q, got %q: %w",
annotations.KubernetesSandboxID,
req.ID,
sid)
sid,
errdefs.ErrFailedPrecondition)
}
owner := filepath.Base(os.Args[0])
@ -168,7 +168,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques
p.jobContainer = true
return &p, nil
} else if !isWCOW {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "oci spec does not contain WCOW or LCOW spec")
return nil, fmt.Errorf("oci spec does not contain WCOW or LCOW spec: %w", errdefs.ErrFailedPrecondition)
}
defer func() {
@ -208,7 +208,7 @@ func createPod(ctx context.Context, events publisher, req *task.CreateTaskReques
if nsid != "" {
if err := parent.ConfigureNetworking(ctx, nsid); err != nil {
return nil, errors.Wrapf(err, "failed to setup networking for pod %q", req.ID)
return nil, fmt.Errorf("failed to setup networking for pod %q: %w", req.ID, err)
}
}
p.sandboxTask = newWcowPodSandboxTask(ctx, events, req.ID, req.Bundle, parent, nsid)
@ -297,16 +297,16 @@ func (p *pod) ID() string {
func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (_ shimTask, err error) {
if req.ID == p.id {
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists", req.ID)
return nil, fmt.Errorf("task with id: %q: %w", req.ID, errdefs.ErrAlreadyExists)
}
e, _ := p.sandboxTask.GetExec("")
if e.State() != shimExecStateRunning {
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task with id: '%s' cannot be created in pod: '%s' which is not running", req.ID, p.id)
return nil, fmt.Errorf("task with id: %q cannot be created in pod: %q which is not running: %w", req.ID, p.id, errdefs.ErrFailedPrecondition)
}
_, ok := p.workloadTasks.Load(req.ID)
if ok {
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "task with id: '%s' already exists id pod: '%s'", req.ID, p.id)
return nil, fmt.Errorf("task with id: %q already exists in pod: %q: %w", req.ID, p.id, errdefs.ErrAlreadyExists)
}
if p.jobContainer {
@ -334,20 +334,20 @@ func (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *sp
return nil, err
}
if ct != oci.KubernetesContainerTypeContainer {
return nil, errors.Wrapf(
errdefs.ErrFailedPrecondition,
"expected annotation: '%s': '%s' got '%s'",
return nil, fmt.Errorf(
"expected annotation: %q: %q, got %q: %w",
annotations.KubernetesContainerType,
oci.KubernetesContainerTypeContainer,
ct)
ct,
errdefs.ErrFailedPrecondition)
}
if sid != p.id {
return nil, errors.Wrapf(
errdefs.ErrFailedPrecondition,
"expected annotation '%s': '%s' got '%s'",
return nil, fmt.Errorf(
"expected annotation %q: %q, got %q: %w",
annotations.KubernetesSandboxID,
p.id,
sid)
sid,
errdefs.ErrFailedPrecondition)
}
st, err := newHcsTask(ctx, p.events, p.host, false, req, s)
@ -365,7 +365,7 @@ func (p *pod) GetTask(tid string) (shimTask, error) {
}
raw, loaded := p.workloadTasks.Load(tid)
if !loaded {
return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid)
return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound)
}
return raw.(shimTask), nil
}
@ -395,7 +395,7 @@ func (p *pod) KillTask(ctx context.Context, tid, eid string, signal uint32, all
return err
}
if all && eid != "" {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all with non empty ExecID: '%s'", eid)
return fmt.Errorf("cannot signal all with non empty ExecID: %q: %w", eid, errdefs.ErrFailedPrecondition)
}
eg := errgroup.Group{}
if all && tid == p.id {
@ -426,15 +426,15 @@ func (p *pod) DeleteTask(ctx context.Context, tid string) error {
t, err := p.GetTask(tid)
if err != nil {
return errors.Wrap(err, "could not find task to delete")
return fmt.Errorf("could not find task to delete: %w", err)
}
e, err := t.GetExec("")
if err != nil {
return errors.Wrap(err, "could not get initial exec")
return fmt.Errorf("could not get initial exec: %w", err)
}
if e.State() == shimExecStateRunning {
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot delete task with running exec")
return fmt.Errorf("cannot delete task with running exec: %w", errdefs.ErrFailedPrecondition)
}
if p.id != tid {

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"fmt"
"io"
"net"
@ -16,7 +17,6 @@ import (
task "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/ttrpc"
typeurl "github.com/containerd/typeurl/v2"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"golang.org/x/sys/windows"
@ -79,7 +79,7 @@ var serveCommand = cli.Command{
// containerd passes the shim options protobuf via stdin.
newShimOpts, err := readOptions(os.Stdin)
if err != nil {
return errors.Wrap(err, "failed to read shim options from stdin")
return fmt.Errorf("failed to read shim options from stdin: %w", err)
} else if newShimOpts != nil {
// We received a valid shim options struct.
shimOpts = newShimOpts
@ -100,7 +100,7 @@ var serveCommand = cli.Command{
if shimOpts.LogLevel != "" {
lvl, err := logrus.ParseLevel(shimOpts.LogLevel)
if err != nil {
return errors.Wrapf(err, "failed to parse shim log level %q", shimOpts.LogLevel)
return fmt.Errorf("failed to parse shim log level %q: %w", shimOpts.LogLevel, err)
}
logrus.SetLevel(lvl)
}
@ -274,16 +274,16 @@ func trapClosedConnErr(err error) error {
func readOptions(r io.Reader) (*runhcsopts.Options, error) {
d, err := io.ReadAll(r)
if err != nil {
return nil, errors.Wrap(err, "failed to read input")
return nil, fmt.Errorf("failed to read input: %w", err)
}
if len(d) > 0 {
var a anypb.Any
if err := proto.Unmarshal(d, &a); err != nil {
return nil, errors.Wrap(err, "failed unmarshalling into Any")
return nil, fmt.Errorf("failed unmarshalling into Any: %w", err)
}
v, err := typeurl.UnmarshalAny(&a)
if err != nil {
return nil, errors.Wrap(err, "failed unmarshalling by typeurl")
return nil, fmt.Errorf("failed unmarshalling by typeurl: %w", err)
}
return v.(*runhcsopts.Options), nil
}
@ -296,7 +296,7 @@ func createEvent(event string) (windows.Handle, error) {
ev, _ := windows.UTF16PtrFromString(event)
sd, err := windows.SecurityDescriptorFromString("D:P(A;;GA;;;BA)(A;;GA;;;SY)")
if err != nil {
return 0, errors.Wrapf(err, "failed to get security descriptor for event '%s'", event)
return 0, fmt.Errorf("failed to get security descriptor for event %q: %w", event, err)
}
var sa windows.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
@ -304,7 +304,7 @@ func createEvent(event string) (windows.Handle, error) {
sa.SecurityDescriptor = sd
h, err := windows.CreateEvent(&sa, 0, 0, ev)
if h == 0 || err != nil {
return 0, errors.Wrapf(err, "failed to create event '%s'", event)
return 0, fmt.Errorf("failed to create event %q: %w", event, err)
}
return h, nil
}

Просмотреть файл

@ -5,6 +5,7 @@ package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
@ -15,7 +16,6 @@ import (
"github.com/containerd/errdefs"
typeurl "github.com/containerd/typeurl/v2"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"google.golang.org/protobuf/types/known/emptypb"
"google.golang.org/protobuf/types/known/timestamppb"
@ -35,7 +35,7 @@ var empty = &emptypb.Empty{}
func (s *service) getPod() (shimPod, error) {
raw := s.taskOrPod.Load()
if raw == nil {
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task with id: '%s' must be created first", s.tid)
return nil, fmt.Errorf("task with id: %q must be created first: %w", s.tid, errdefs.ErrFailedPrecondition)
}
return raw.(shimPod), nil
}
@ -47,7 +47,7 @@ func (s *service) getPod() (shimPod, error) {
func (s *service) getTask(tid string) (shimTask, error) {
raw := s.taskOrPod.Load()
if raw == nil {
return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid)
return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound)
}
if s.isSandbox {
p := raw.(shimPod)
@ -55,7 +55,7 @@ func (s *service) getTask(tid string) (shimTask, error) {
}
// When its not a sandbox only the init task is a valid id.
if s.tid != tid {
return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", tid)
return nil, fmt.Errorf("task with id: %q: %w", tid, errdefs.ErrNotFound)
}
return raw.(shimTask), nil
}
@ -96,12 +96,12 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques
f.Close()
spec = oci.UpdateSpecFromOptions(spec, shimOpts)
//expand annotations after defaults have been loaded in from options
// expand annotations after defaults have been loaded in from options
err = oci.ProcessAnnotations(ctx, &spec)
// since annotation expansion is used to toggle security features
// raise it rather than suppress and move on
if err != nil {
return nil, errors.Wrap(err, "unable to process OCI Spec annotations")
return nil, fmt.Errorf("unable to process OCI Spec annotations: %w", err)
}
// If sandbox isolation is set to hypervisor, make sure the HyperV option
@ -124,7 +124,7 @@ func (s *service) createInternal(ctx context.Context, req *task.CreateTaskReques
}
if req.Terminal && req.Stderr != "" {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty")
return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition)
}
resp := &task.CreateTaskResponse{}
@ -198,7 +198,7 @@ func (s *service) deleteInternal(ctx context.Context, req *task.DeleteRequest) (
if s.isSandbox && req.ExecID == "" {
p, err := s.getPod()
if err != nil {
return nil, errors.Wrapf(err, "could not get pod %q to delete task %q", s.tid, req.ID)
return nil, fmt.Errorf("could not get pod %q to delete task %q: %w", s.tid, req.ID, err)
}
err = p.DeleteTask(ctx, req.ID)
if err != nil {
@ -227,7 +227,7 @@ func (s *service) pidsInternal(ctx context.Context, req *task.PidsRequest) (*tas
for i, p := range pids {
a, err := typeurl.MarshalAny(p)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal ProcessDetails for process: %s, task: %s", p.ExecID, req.ID)
return nil, fmt.Errorf("failed to marshal ProcessDetails for process: %s, task: %s: %w", p.ExecID, req.ID, err)
}
proc := &containerd_v1_types.ProcessInfo{
Pid: p.ProcessID,
@ -272,7 +272,7 @@ func (s *service) killInternal(ctx context.Context, req *task.KillRequest) (*emp
if s.isSandbox {
pod, err := s.getPod()
if err != nil {
return nil, errors.Wrapf(errdefs.ErrNotFound, "%v: task with id: '%s' not found", err, req.ID)
return nil, fmt.Errorf("%v: task with id: %q: %w", err, req.ID, errdefs.ErrNotFound)
}
// Send it to the POD and let it cascade on its own through all tasks.
err = pod.KillTask(ctx, req.ID, req.ExecID, req.Signal, req.All)
@ -299,11 +299,11 @@ func (s *service) execInternal(ctx context.Context, req *task.ExecProcessRequest
return nil, err
}
if req.Terminal && req.Stderr != "" {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty")
return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition)
}
var spec specs.Process
if err := json.Unmarshal(req.Spec.Value, &spec); err != nil {
return nil, errors.Wrap(err, "request.Spec was not oci process")
return nil, fmt.Errorf("request.Spec was not oci process: %w", err)
}
err = t.CreateExec(ctx, req, &spec)
if err != nil {
@ -314,7 +314,7 @@ func (s *service) execInternal(ctx context.Context, req *task.ExecProcessRequest
func (s *service) diagExecInHostInternal(ctx context.Context, req *shimdiag.ExecProcessRequest) (*shimdiag.ExecProcessResponse, error) {
if req.Terminal && req.Stderr != "" {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "if using terminal, stderr must be empty")
return nil, fmt.Errorf("if using terminal, stderr must be empty: %w", errdefs.ErrFailedPrecondition)
}
t, err := s.getTask(s.tid)
if err != nil {
@ -353,7 +353,7 @@ func (s *service) diagListExecs(task shimTask) ([]*shimdiag.Exec, error) {
func (s *service) diagTasksInternal(ctx context.Context, req *shimdiag.TasksRequest) (_ *shimdiag.TasksResponse, err error) {
raw := s.taskOrPod.Load()
if raw == nil {
return nil, errors.Wrapf(errdefs.ErrNotFound, "task with id: '%s' not found", s.tid)
return nil, fmt.Errorf("task with id: %q: %w", s.tid, errdefs.ErrNotFound)
}
resp := &shimdiag.TasksResponse{}
@ -432,7 +432,7 @@ func (s *service) closeIOInternal(ctx context.Context, req *task.CloseIORequest)
func (s *service) updateInternal(ctx context.Context, req *task.UpdateTaskRequest) (*emptypb.Empty, error) {
if req.Resources == nil {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "resources cannot be empty, updating container %s resources failed", req.ID)
return nil, fmt.Errorf("resources cannot be empty, updating container %s resources failed: %w", req.ID, errdefs.ErrInvalidArgument)
}
t, err := s.getTask(req.ID)
if err != nil {
@ -476,7 +476,7 @@ func (s *service) statsInternal(ctx context.Context, req *task.StatsRequest) (*t
}
any, err := typeurl.MarshalAny(stats)
if err != nil {
return nil, errors.Wrapf(err, "failed to marshal Statistics for task: %s", req.ID)
return nil, fmt.Errorf("failed to marshal Statistics for task: %q: %w", req.ID, err)
}
return &task.StatsResponse{Stats: protobuf.FromAny(any)}, nil
}

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"fmt"
"reflect"
"testing"
@ -12,12 +13,11 @@ import (
"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats"
v1 "github.com/containerd/cgroups/v3/cgroup1/stats"
task "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/pkg/errors"
)
func verifyExpectedError(t *testing.T, resp interface{}, actual, expected error) {
t.Helper()
if actual == nil || errors.Cause(actual) != expected || !errors.Is(actual, expected) { //nolint:errorlint
if actual == nil || !errors.Is(actual, expected) { //nolint:errorlint
t.Fatalf("expected error: %v, got: %v", expected, actual)
}

Просмотреть файл

@ -17,7 +17,6 @@ import (
task "github.com/containerd/containerd/api/runtime/task/v2"
"github.com/containerd/containerd/runtime/v2/shim"
"github.com/containerd/ttrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
@ -82,7 +81,7 @@ The start command can either start a new shim or return an address to an existin
// Connect to the hosting shim and get the pid
c, err := winio.DialPipe(address, nil)
if err != nil {
return errors.Wrap(err, "failed to connect to hosting shim")
return fmt.Errorf("failed to connect to hosting shim: %w", err)
}
cl := ttrpc.NewClient(c, ttrpc.WithOnClose(func() { c.Close() }))
t := task.NewTaskClient(cl)
@ -93,7 +92,7 @@ The start command can either start a new shim or return an address to an existin
cl.Close()
c.Close()
if err != nil {
return errors.Wrap(err, "failed to get shim pid from hosting shim")
return fmt.Errorf("failed to get shim pid from hosting shim: %w", err)
}
pid = int(cr.ShimPid)
}
@ -102,7 +101,7 @@ The start command can either start a new shim or return an address to an existin
if address == "" {
isSandbox := ct == oci.KubernetesContainerTypeSandbox
if isSandbox && idFlag != sbid {
return errors.Errorf(
return fmt.Errorf(
"'id' and '%s' must match for '%s=%s'",
annotations.KubernetesSandboxID,
annotations.KubernetesContainerType,
@ -197,7 +196,7 @@ func getSpecAnnotations(bundlePath string) (map[string]string, error) {
defer f.Close()
var spec specAnnotations
if err := json.NewDecoder(f).Decode(&spec); err != nil {
return nil, errors.Wrap(err, "failed to deserialize valid OCI spec")
return nil, fmt.Errorf("failed to deserialize valid OCI spec: %w", err)
}
return spec.Annotations, nil
}

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -18,7 +19,6 @@ import (
"github.com/containerd/errdefs"
"github.com/containerd/typeurl/v2"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/protobuf/types/known/timestamppb"
@ -59,11 +59,11 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat
return nil, err
}
if ct != oci.KubernetesContainerTypeNone {
return nil, errors.Wrapf(
errdefs.ErrFailedPrecondition,
"cannot create standalone task, expected no annotation: '%s': got '%s'",
return nil, fmt.Errorf(
"cannot create standalone task, expected no annotation: %q, got %q: %w",
annotations.KubernetesContainerType,
ct)
ct,
errdefs.ErrFailedPrecondition)
}
owner := filepath.Base(os.Args[0])
@ -102,7 +102,7 @@ func newHcsStandaloneTask(ctx context.Context, events publisher, req *task.Creat
parent.Close()
}
} else if !oci.IsWCOW(s) {
return nil, errors.Wrap(errdefs.ErrFailedPrecondition, "oci spec does not contain WCOW or LCOW spec")
return nil, fmt.Errorf("oci spec does not contain WCOW or LCOW spec: %w", errdefs.ErrFailedPrecondition)
}
shim, err := newHcsTask(ctx, events, parent, true, req, s)
@ -186,7 +186,8 @@ func newHcsTask(
parent *uvm.UtilityVM,
ownsParent bool,
req *task.CreateTaskRequest,
s *specs.Spec) (_ shimTask, err error) {
s *specs.Spec,
) (_ shimTask, err error) {
log.G(ctx).WithFields(logrus.Fields{
"tid": req.ID,
"ownsParent": ownsParent,
@ -354,11 +355,11 @@ func (ht *hcsTask) CreateExec(ctx context.Context, req *task.ExecProcessRequest,
// If the task exists or we got a request for "" which is the init task
// fail.
if _, loaded := ht.execs.Load(req.ExecID); loaded || req.ExecID == "" {
return errors.Wrapf(errdefs.ErrAlreadyExists, "exec: '%s' in task: '%s' already exists", req.ExecID, ht.id)
return fmt.Errorf("exec: %q in task: %q: %w", req.ExecID, ht.id, errdefs.ErrAlreadyExists)
}
if ht.init.State() != shimExecStateRunning {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "exec: '' in task: '%s' must be running to create additional execs", ht.id)
return fmt.Errorf("exec: \"\" in task: %q must be running to create additional execs: %w", ht.id, errdefs.ErrFailedPrecondition)
}
io, err := cmd.NewUpstreamIO(ctx, req.ID, req.Stdout, req.Stderr, req.Stdin, req.Terminal, ht.ioRetryTimeout)
@ -397,7 +398,7 @@ func (ht *hcsTask) GetExec(eid string) (shimExec, error) {
}
raw, loaded := ht.execs.Load(eid)
if !loaded {
return nil, errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", eid, ht.id)
return nil, fmt.Errorf("exec: %q in task: %q: %w", eid, ht.id, errdefs.ErrNotFound)
}
return raw.(shimExec), nil
}
@ -425,7 +426,7 @@ func (ht *hcsTask) KillExec(ctx context.Context, eid string, signal uint32, all
return err
}
if all && eid != "" {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all for non-empty exec: '%s'", eid)
return fmt.Errorf("cannot signal all for non-empty exec: %q: %w", eid, errdefs.ErrFailedPrecondition)
}
if all {
// We are in a kill all on the init task. Signal everything.
@ -508,7 +509,7 @@ func (ht *hcsTask) DeleteExec(ctx context.Context, eid string) (int, uint32, tim
select {
case <-time.After(30 * time.Second):
log.G(ctx).Error("timed out waiting for resource cleanup")
return 0, 0, time.Time{}, errors.Wrap(hcs.ErrTimeout, "waiting for container resource cleanup")
return 0, 0, time.Time{}, fmt.Errorf("waiting for container resource cleanup: %w", hcs.ErrTimeout)
case <-ht.closed:
}
@ -573,7 +574,7 @@ func (ht *hcsTask) Pids(ctx context.Context) ([]*runhcsopts.ProcessDetails, erro
props, err := ht.c.Properties(ctx, schema1.PropertyTypeProcessList)
if err != nil {
if isStatsNotFound(err) {
return nil, errors.Wrapf(errdefs.ErrNotFound, "failed to fetch pids: %s", err)
return nil, fmt.Errorf("failed to fetch pids: %w: %w", err, errdefs.ErrNotFound)
}
return nil, err
}
@ -827,7 +828,7 @@ func (ht *hcsTask) Stats(ctx context.Context) (*stats.Statistics, error) {
props, err := ht.c.PropertiesV2(ctx, hcsschema.PTStatistics)
if err != nil {
if isStatsNotFound(err) {
return nil, errors.Wrapf(errdefs.ErrNotFound, "failed to fetch stats: %s", err)
return nil, fmt.Errorf("failed to fetch stats: %w: %w", err, errdefs.ErrNotFound)
}
return nil, err
}
@ -852,7 +853,7 @@ func (ht *hcsTask) Stats(ctx context.Context) (*stats.Statistics, error) {
func (ht *hcsTask) Update(ctx context.Context, req *task.UpdateTaskRequest) error {
resources, err := typeurl.UnmarshalAny(req.Resources)
if err != nil {
return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID)
return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err)
}
if err := verifyTaskUpdateResourcesType(resources); err != nil {
@ -1024,7 +1025,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd
// about the isolated case.
hostPath, err := fs.ResolvePath(resources.HostPath)
if err != nil {
return errors.Wrapf(err, "failed to resolve path for hostPath %s", resources.HostPath)
return fmt.Errorf("failed to resolve path for hostPath %q: %w", resources.HostPath, err)
}
// process isolated windows container
@ -1034,7 +1035,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd
ReadOnly: resources.ReadOnly,
}
if err := ht.requestAddContainerMount(ctx, resourcepaths.SiloMappedDirectoryResourcePath, settings); err != nil {
return errors.Wrapf(err, "failed to add mount to process isolated container")
return fmt.Errorf("failed to add mount to process isolated container: %w", err)
}
} else {
// if it is a mount request for a running hyperV WCOW container, we should first mount volume to the
@ -1052,7 +1053,7 @@ func (ht *hcsTask) updateWCOWContainerMount(ctx context.Context, resources *ctrd
ReadOnly: resources.ReadOnly,
}
if err := ht.requestAddContainerMount(ctx, resourcepaths.SiloMappedDirectoryResourcePath, settings); err != nil {
return errors.Wrapf(err, "failed to add mount to hyperV container")
return fmt.Errorf("failed to add mount to hyperV container: %w", err)
}
}
return nil

Просмотреть файл

@ -4,6 +4,8 @@ package main
import (
"context"
"errors"
"fmt"
"time"
"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
@ -15,7 +17,6 @@ import (
"github.com/containerd/errdefs"
typeurl "github.com/containerd/typeurl/v2"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
var _ = (shimTask)(&testShimTask{})
@ -106,7 +107,7 @@ func (tst *testShimTask) DumpGuestStacks(ctx context.Context) string {
func (tst *testShimTask) Update(ctx context.Context, req *task.UpdateTaskRequest) error {
data, err := typeurl.UnmarshalAny(req.Resources)
if err != nil {
return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID)
return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err)
}
if err := verifyTaskUpdateResourcesType(data); err != nil {
return err

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"fmt"
"sync"
"time"
@ -20,7 +21,6 @@ import (
"github.com/containerd/errdefs"
typeurl "github.com/containerd/typeurl/v2"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -99,7 +99,7 @@ func (wpst *wcowPodSandboxTask) ID() string {
}
func (wpst *wcowPodSandboxTask) CreateExec(ctx context.Context, req *task.ExecProcessRequest, s *specs.Process) error {
return errors.Wrap(errdefs.ErrNotImplemented, "WCOW Pod task should never issue exec")
return fmt.Errorf("WCOW Pod task should never issue exec: %w", errdefs.ErrNotImplemented)
}
func (wpst *wcowPodSandboxTask) GetExec(eid string) (shimExec, error) {
@ -107,7 +107,7 @@ func (wpst *wcowPodSandboxTask) GetExec(eid string) (shimExec, error) {
return wpst.init, nil
}
// Cannot exec in an a WCOW sandbox container so all non-init calls fail here.
return nil, errors.Wrapf(errdefs.ErrNotFound, "exec: '%s' in task: '%s' not found", eid, wpst.id)
return nil, fmt.Errorf("exec: %q in task: %q: %w", eid, wpst.id, errdefs.ErrNotFound)
}
func (wpst *wcowPodSandboxTask) ListExecs() ([]shimExec, error) {
@ -120,7 +120,7 @@ func (wpst *wcowPodSandboxTask) KillExec(ctx context.Context, eid string, signal
return err
}
if all && eid != "" {
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot signal all for non-empty exec: '%s'", eid)
return fmt.Errorf("cannot signal all for non-empty exec: %q: %w", eid, errdefs.ErrFailedPrecondition)
}
err = e.Kill(ctx, signal)
if err != nil {
@ -275,7 +275,7 @@ func (wpst *wcowPodSandboxTask) Update(ctx context.Context, req *task.UpdateTask
resources, err := typeurl.UnmarshalAny(req.Resources)
if err != nil {
return errors.Wrapf(err, "failed to unmarshal resources for container %s update request", req.ID)
return fmt.Errorf("failed to unmarshal resources for container %q update request: %w", req.ID, err)
}
if err := verifyTaskUpdateResourcesType(resources); err != nil {

Просмотреть файл

@ -17,7 +17,6 @@ import (
cgroups "github.com/containerd/cgroups/v3/cgroup1"
cgroupstats "github.com/containerd/cgroups/v3/cgroup1/stats"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@ -121,12 +120,12 @@ func runWithRestartMonitor(arg0 string, args ...string) {
func startTimeSyncService() error {
ptpClassDir, err := os.Open("/sys/class/ptp")
if err != nil {
return errors.Wrap(err, "failed to open PTP class directory")
return fmt.Errorf("failed to open PTP class directory: %w", err)
}
ptpDirList, err := ptpClassDir.Readdirnames(-1)
if err != nil {
return errors.Wrap(err, "failed to list PTP class directory")
return fmt.Errorf("failed to list PTP class directory: %w", err)
}
var ptpDirPath string
@ -137,7 +136,7 @@ func startTimeSyncService() error {
clockNameFilePath := filepath.Join(ptpClassDir.Name(), ptpDirPath, "clock_name")
buf, err := os.ReadFile(clockNameFilePath)
if err != nil && !os.IsNotExist(err) {
return errors.Wrapf(err, "failed to read clock name file at %s", clockNameFilePath)
return fmt.Errorf("failed to read clock name: %w", err)
}
if string(buf) == expectedClockName {
@ -147,7 +146,7 @@ func startTimeSyncService() error {
}
if !found {
return errors.Errorf("no PTP device found with name \"%s\"", expectedClockName)
return fmt.Errorf("no PTP device found with name %q", expectedClockName)
}
// create chronyd config file
@ -155,9 +154,9 @@ func startTimeSyncService() error {
// chronyd config file take from: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/time-sync
chronydConfigString := fmt.Sprintf("refclock PHC %s poll 3 dpoll -2 offset 0 stratum 2\nmakestep 0.1 -1\n", ptpDevPath)
chronydConfPath := "/tmp/chronyd.conf"
err = os.WriteFile(chronydConfPath, []byte(chronydConfigString), 0644)
err = os.WriteFile(chronydConfPath, []byte(chronydConfigString), 0o644)
if err != nil {
return errors.Wrapf(err, "failed to create chronyd conf file %s", chronydConfPath)
return fmt.Errorf("failed to create chronyd conf file: %w", err)
}
// start chronyd. Do NOT start chronyd as daemon because creating a daemon
@ -220,7 +219,7 @@ func main() {
var logWriter *os.File
if *logFile != "" {
logFileHandle, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
logFileHandle, err := os.OpenFile(*logFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
if err != nil {
logrus.WithFields(logrus.Fields{
"path": *logFile,
@ -285,7 +284,7 @@ func main() {
if err := os.WriteFile(
"/proc/sys/kernel/core_pattern",
[]byte(*coreDumpLoc),
0644,
0o644,
); err != nil {
logrus.WithError(err).Fatal("failed to set core dump location")
}
@ -333,7 +332,7 @@ func main() {
// Write 1 to memory.use_hierarchy on the root cgroup to enable hierarchy
// support. This needs to be set before we create any cgroups as the write
// will fail otherwise.
if err := os.WriteFile("/sys/fs/cgroup/memory/memory.use_hierarchy", []byte("1"), 0644); err != nil {
if err := os.WriteFile("/sys/fs/cgroup/memory/memory.use_hierarchy", []byte("1"), 0o644); err != nil {
logrus.WithError(err).Fatal("failed to enable hierarchy support for root cgroup")
}

Просмотреть файл

@ -5,6 +5,7 @@ package main
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
@ -14,7 +15,6 @@ import (
"github.com/Microsoft/hcsshim/internal/guest/storage/overlay"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/pkg/errors"
)
const moduleExtension = ".ko"
@ -51,7 +51,7 @@ func install(ctx context.Context) error {
modules := []string{}
if walkErr := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return errors.Wrap(err, "failed to read directory while walking dir")
return fmt.Errorf("failed to read directory while walking dir: %w", err)
}
if !info.IsDir() && filepath.Ext(info.Name()) == moduleExtension {
moduleName := strings.TrimSuffix(info.Name(), moduleExtension)
@ -67,7 +67,7 @@ func install(ctx context.Context) error {
cmd := exec.Command("depmod", depmodArgs...)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to run depmod with args %v: %s", depmodArgs, out)
return fmt.Errorf("failed to run depmod with args %v: %w (output: %s)", depmodArgs, err, out)
}
// run modprobe for every module name found
@ -79,7 +79,7 @@ func install(ctx context.Context) error {
out, err = cmd.CombinedOutput()
if err != nil {
return errors.Wrapf(err, "failed to run modprobe with args %v: %s", modprobeArgs, out)
return fmt.Errorf("failed to run modprobe with args %v: %w (output: %s)", modprobeArgs, err, out)
}
return nil

Просмотреть файл

@ -3,9 +3,8 @@
package main
import (
"errors"
"sync"
"github.com/pkg/errors"
)
var errNilCache = errors.New("cannot access a nil cache")

Просмотреть файл

@ -4,11 +4,11 @@ package main
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
@ -30,20 +30,20 @@ var configCommand = cli.Command{
configData, err := json.MarshalIndent(defaultConfig(), "", " ")
if err != nil {
return errors.Wrap(err, "failed to marshal ncproxy config to json")
return fmt.Errorf("failed to marshal ncproxy config to json: %w", err)
}
if file != "" {
// Make the directory if it doesn't exist.
if _, err := os.Stat(filepath.Dir(file)); err != nil {
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
return errors.Wrap(err, "failed to make path to config file")
if err := os.MkdirAll(filepath.Dir(file), 0o700); err != nil {
return fmt.Errorf("failed to make path to config file: %w", err)
}
}
if err := os.WriteFile(
file,
[]byte(configData),
0700,
0o700,
); err != nil {
return err
}
@ -96,7 +96,7 @@ func loadConfig(path string) (*config, error) {
func readConfig(path string) (*config, error) {
data, err := os.ReadFile(path)
if err != nil {
return nil, errors.Wrap(err, "failed to read config file")
return nil, fmt.Errorf("failed to read config file: %w", err)
}
conf := &config{}
if err := json.Unmarshal(data, conf); err != nil {

Просмотреть файл

@ -5,6 +5,7 @@ package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"strings"
@ -12,7 +13,6 @@ import (
"github.com/Microsoft/hcsshim/hcn"
"github.com/Microsoft/hcsshim/internal/log"
ncproxygrpc "github.com/Microsoft/hcsshim/pkg/ncproxy/ncproxygrpc/v1"
"github.com/pkg/errors"
)
func hcnEndpointToEndpointResponse(ep *hcn.HostComputeEndpoint) (_ *ncproxygrpc.GetEndpointResponse, err error) {
@ -36,12 +36,12 @@ func hcnEndpointToEndpointResponse(ep *hcn.HostComputeEndpoint) (_ *ncproxygrpc.
ipConfigInfos := ep.IpConfigurations
// there may be one ipv4 and/or one ipv6 configuration for an endpoint
if len(ipConfigInfos) == 0 || len(ipConfigInfos) > 2 {
return nil, errors.Errorf("invalid number (%v) of ip configuration information for endpoint %v", len(ipConfigInfos), ep.Name)
return nil, fmt.Errorf("invalid number (%v) of ip configuration information for endpoint %v", len(ipConfigInfos), ep.Name)
}
for _, ipConfig := range ipConfigInfos {
ip := net.ParseIP(ipConfig.IpAddress)
if ip == nil {
return nil, errors.Errorf("failed to parse IP address %v", ipConfig.IpAddress)
return nil, fmt.Errorf("failed to parse IP address %v", ipConfig.IpAddress)
}
if ip.To4() != nil {
// this is an IPv4 address
@ -121,7 +121,7 @@ func constructEndpointPolicies(req *ncproxygrpc.HcnEndpointPolicies) ([]hcn.Endp
}
iovJSON, err := json.Marshal(iovSettings)
if err != nil {
return []hcn.EndpointPolicy{}, errors.Wrap(err, "failed to marshal IovPolicySettings")
return []hcn.EndpointPolicy{}, fmt.Errorf("failed to marshal IovPolicySettings: %w", err)
}
policy := hcn.EndpointPolicy{
Type: hcn.IOV,
@ -136,7 +136,7 @@ func constructEndpointPolicies(req *ncproxygrpc.HcnEndpointPolicies) ([]hcn.Endp
}
portPolicyJSON, err := json.Marshal(portPolicy)
if err != nil {
return []hcn.EndpointPolicy{}, errors.Wrap(err, "failed to marshal portname")
return []hcn.EndpointPolicy{}, fmt.Errorf("failed to marshal portname: %w", err)
}
policy := hcn.EndpointPolicy{
Type: hcn.PortName,
@ -152,7 +152,7 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe
// Check if the network already exists, and if so return error.
_, err := hcn.GetNetworkByName(req.Name)
if err == nil {
return nil, errors.Errorf("network with name %q already exists", req.Name)
return nil, fmt.Errorf("network with name %q already exists", req.Name)
}
policies := []hcn.NetworkPolicy{}
@ -163,20 +163,20 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe
extSwitch, err := hcn.GetNetworkByName(req.SwitchName)
if err != nil {
if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint
return nil, errors.Errorf("no network/switch with name `%s` found", req.SwitchName)
return nil, fmt.Errorf("no network/switch with name %q found", req.SwitchName)
}
return nil, errors.Wrapf(err, "failed to get network/switch with name %q", req.SwitchName)
return nil, fmt.Errorf("failed to get network/switch with name %q: %q", req.SwitchName, err)
}
// Get layer ID and use this as the basis for what to layer the new network over.
if extSwitch.Health.Extra.LayeredOn == "" {
return nil, errors.Errorf("no layer ID found for network %q found", extSwitch.Id)
return nil, fmt.Errorf("no layer ID found for network %q found", extSwitch.Id)
}
layerPolicy := hcn.LayerConstraintNetworkPolicySetting{LayerId: extSwitch.Health.Extra.LayeredOn}
data, err := json.Marshal(layerPolicy)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal layer policy")
return nil, fmt.Errorf("failed to marshal layer policy: %w", err)
}
netPolicy := hcn.NetworkPolicy{
@ -238,7 +238,7 @@ func createHCNNetwork(ctx context.Context, req *ncproxygrpc.HostComputeNetworkSe
network, err = network.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create HNS network %q", req.Name)
return nil, fmt.Errorf("failed to create HNS network %q: %w", req.Name, err)
}
return network, nil
@ -355,7 +355,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req
if req.Policies != nil {
policies, err = constructEndpointPolicies(req.Policies)
if err != nil {
return nil, errors.Wrap(err, "failed to construct endpoint policies")
return nil, fmt.Errorf("failed to construct endpoint policies: %w", err)
}
}
@ -380,7 +380,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req
}
endpoint, err = endpoint.Create()
if err != nil {
return nil, errors.Wrap(err, "failed to create HNS endpoint")
return nil, fmt.Errorf("failed to create HNS endpoint: %w", err)
}
return endpoint, nil
@ -391,7 +391,7 @@ func createHCNEndpoint(ctx context.Context, network *hcn.HostComputeNetwork, req
func getHostDefaultNamespace() (string, error) {
namespaces, err := hcn.ListNamespaces()
if err != nil {
return "", errors.Wrapf(err, "failed list namespaces")
return "", fmt.Errorf("failed to list namespaces: %w", err)
}
for _, ns := range namespaces {

Просмотреть файл

@ -5,13 +5,15 @@ package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/Microsoft/go-winio"
"github.com/containerd/containerd/protobuf"
"github.com/containerd/ttrpc"
typeurl "github.com/containerd/typeurl/v2"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -113,7 +115,7 @@ func (s *grpcService) AddNIC(ctx context.Context, req *ncproxygrpc.AddNICRequest
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err)
}
anyEndpoint, err = typeurl.MarshalAny(ep)
@ -143,7 +145,7 @@ func (s *grpcService) AddNIC(ctx context.Context, req *ncproxygrpc.AddNICRequest
}
policies := []hcn.EndpointPolicy{iovPolicy}
if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil {
return nil, errors.Wrap(err, "failed to add policy to endpoint")
return nil, fmt.Errorf("failed to add policy to endpoint: %w", err)
}
}
}
@ -183,7 +185,7 @@ func (s *grpcService) ModifyNIC(ctx context.Context, req *ncproxygrpc.ModifyNICR
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err)
}
anyEndpoint, err := typeurl.MarshalAny(ep)
@ -237,14 +239,14 @@ func (s *grpcService) ModifyNIC(ctx context.Context, req *ncproxygrpc.ModifyNICR
return nil, err
}
if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil {
return nil, errors.Wrap(err, "failed to modify network adapter")
return nil, fmt.Errorf("failed to modify network adapter: %w", err)
}
if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeRemove); err != nil {
return nil, errors.Wrap(err, "failed to modify network adapter")
return nil, fmt.Errorf("failed to modify network adapter: %w", err)
}
} else {
if err := modifyEndpoint(ctx, ep.Id, policies, hcn.RequestTypeUpdate); err != nil {
return nil, errors.Wrap(err, "failed to modify network adapter")
return nil, fmt.Errorf("failed to modify network adapter: %w", err)
}
if _, err := agent.ModifyNIC(ctx, caReq); err != nil {
return nil, err
@ -284,7 +286,7 @@ func (s *grpcService) DeleteNIC(ctx context.Context, req *ncproxygrpc.DeleteNICR
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.EndpointName)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.EndpointName)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.EndpointName, err)
}
anyEndpoint, err = typeurl.MarshalAny(ep)
if err != nil {
@ -387,7 +389,7 @@ func (s *grpcService) CreateEndpoint(ctx context.Context, req *ncproxygrpc.Creat
if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", reqEndpoint.NetworkName)
}
return nil, errors.Wrapf(err, "failed to get network with name %q", reqEndpoint.NetworkName)
return nil, fmt.Errorf("failed to get network with name %q: %w", reqEndpoint.NetworkName, err)
}
ep, err := createHCNEndpoint(ctx, network, reqEndpoint)
if err != nil {
@ -403,9 +405,9 @@ func (s *grpcService) CreateEndpoint(ctx context.Context, req *ncproxygrpc.Creat
return nil, status.Errorf(codes.InvalidArgument, "received empty field in request: %+v", req)
}
network, err := s.ncpNetworkingStore.GetNetworkByName(ctx, reqEndpoint.NetworkName)
if err != nil || network == nil {
return nil, errors.Wrapf(err, "network %v does not exist", reqEndpoint.NetworkName)
_, err := s.ncpNetworkingStore.GetNetworkByName(ctx, reqEndpoint.NetworkName)
if err != nil {
return nil, fmt.Errorf("network %v does not exist: %w", reqEndpoint.NetworkName, err)
}
epSettings := &ncproxynetworking.EndpointSettings{
Name: reqEndpoint.Name,
@ -452,7 +454,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo
if endpt, err := s.ncpNetworkingStore.GetEndpointByName(ctx, req.Name); err == nil {
endpt.NamespaceID = req.NamespaceID
if err := s.ncpNetworkingStore.UpdateEndpoint(ctx, endpt); err != nil {
return nil, errors.Wrapf(err, "failed to update endpoint with name `%s`", req.Name)
return nil, fmt.Errorf("failed to update endpoint with name %q: %w", req.Name, err)
}
} else {
if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) {
@ -464,7 +466,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name `%s`", req.Name)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err)
}
if req.AttachToHost {
if req.NamespaceID != "" {
@ -483,7 +485,7 @@ func (s *grpcService) AddEndpoint(ctx context.Context, req *ncproxygrpc.AddEndpo
span.AddAttributes(trace.StringAttribute("namespaceID", req.NamespaceID))
}
if err := hcn.AddNamespaceEndpoint(req.NamespaceID, ep.Id); err != nil {
return nil, errors.Wrapf(err, "failed to add endpoint with name %q to namespace", req.Name)
return nil, fmt.Errorf("failed to add endpoint with name %q to namespace: %w", req.Name, err)
}
}
@ -504,7 +506,7 @@ func (s *grpcService) DeleteEndpoint(ctx context.Context, req *ncproxygrpc.Delet
if _, err := s.ncpNetworkingStore.GetEndpointByName(ctx, req.Name); err == nil {
if err := s.ncpNetworkingStore.DeleteEndpoint(ctx, req.Name); err != nil {
return nil, errors.Wrapf(err, "failed to delete endpoint with name %q", req.Name)
return nil, fmt.Errorf("failed to delete endpoint with name %q: %w", req.Name, err)
}
} else {
if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) {
@ -516,11 +518,11 @@ func (s *grpcService) DeleteEndpoint(ctx context.Context, req *ncproxygrpc.Delet
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name %q", req.Name)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err)
}
if err = ep.Delete(); err != nil {
return nil, errors.Wrapf(err, "failed to delete endpoint with name %q", req.Name)
return nil, fmt.Errorf("failed to delete endpoint with name %q: %w", req.Name, err)
}
}
return &ncproxygrpc.DeleteEndpointResponse{}, nil
@ -540,7 +542,7 @@ func (s *grpcService) DeleteNetwork(ctx context.Context, req *ncproxygrpc.Delete
if _, err := s.ncpNetworkingStore.GetNetworkByName(ctx, req.Name); err == nil {
if err := s.ncpNetworkingStore.DeleteNetwork(ctx, req.Name); err != nil {
return nil, errors.Wrapf(err, "failed to delete network with name %q", req.Name)
return nil, fmt.Errorf("failed to delete network with name %q: %w", req.Name, err)
}
} else {
if !errors.Is(err, ncproxystore.ErrBucketNotFound) && !errors.Is(err, ncproxystore.ErrKeyNotFound) {
@ -551,11 +553,11 @@ func (s *grpcService) DeleteNetwork(ctx context.Context, req *ncproxygrpc.Delete
if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", req.Name)
}
return nil, errors.Wrapf(err, "failed to get network with name %q", req.Name)
return nil, fmt.Errorf("failed to get network with name %q: %w", req.Name, err)
}
if err = network.Delete(); err != nil {
return nil, errors.Wrapf(err, "failed to delete network with name %q", req.Name)
return nil, fmt.Errorf("failed to delete network with name %q: %w", req.Name, err)
}
}
@ -618,7 +620,7 @@ func (s *grpcService) GetEndpoint(ctx context.Context, req *ncproxygrpc.GetEndpo
if _, ok := err.(hcn.EndpointNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no endpoint with name `%s` found", req.Name)
}
return nil, errors.Wrapf(err, "failed to get endpoint with name %q", req.Name)
return nil, fmt.Errorf("failed to get endpoint with name %q: %w", req.Name, err)
}
return hcnEndpointToEndpointResponse(ep)
}
@ -632,12 +634,12 @@ func (s *grpcService) GetEndpoints(ctx context.Context, req *ncproxygrpc.GetEndp
rawHCNEndpoints, err := hcn.ListEndpoints()
if err != nil {
return nil, errors.Wrap(err, "failed to get HNS endpoints")
return nil, fmt.Errorf("failed to get HNS endpoints: %w", err)
}
rawNCProxyEndpoints, err := s.ncpNetworkingStore.ListEndpoints(ctx)
if err != nil && !errors.Is(err, ncproxystore.ErrBucketNotFound) {
return nil, errors.Wrap(err, "failed to get ncproxy networking endpoints")
return nil, fmt.Errorf("failed to get ncproxy networking endpoints: %w", err)
}
for _, endpoint := range rawHCNEndpoints {
@ -697,7 +699,7 @@ func (s *grpcService) GetNetwork(ctx context.Context, req *ncproxygrpc.GetNetwor
if _, ok := err.(hcn.NetworkNotFoundError); ok { //nolint:errorlint
return nil, status.Errorf(codes.NotFound, "no network with name `%s` found", req.Name)
}
return nil, errors.Wrapf(err, "failed to get network with name %q", req.Name)
return nil, fmt.Errorf("failed to get network with name %q: %w", req.Name, err)
}
return hcnNetworkToNetworkResponse(ctx, network)
@ -712,12 +714,12 @@ func (s *grpcService) GetNetworks(ctx context.Context, req *ncproxygrpc.GetNetwo
rawHCNNetworks, err := hcn.ListNetworks()
if err != nil {
return nil, errors.Wrap(err, "failed to get HNS networks")
return nil, fmt.Errorf("failed to get HNS networks: %w", err)
}
rawNCProxyNetworks, err := s.ncpNetworkingStore.ListNetworks(ctx)
if err != nil && !errors.Is(err, ncproxystore.ErrBucketNotFound) {
return nil, errors.Wrap(err, "failed to get ncproxy networking networks")
return nil, fmt.Errorf("failed to get ncproxy networking networks: %w", err)
}
for _, network := range rawHCNNetworks {
@ -763,7 +765,7 @@ func newTTRPCService(ctx context.Context, agent *computeAgentCache, agentStore *
func getComputeAgentClient(agentAddr string) (*computeAgentClient, error) {
conn, err := winioDialPipe(agentAddr, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to connect to compute agent service")
return nil, fmt.Errorf("failed to connect to compute agent service: %w", err)
}
raw := ttrpcNewClient(
conn,

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"fmt"
"io"
"os"
@ -16,7 +17,7 @@ import (
"github.com/Microsoft/go-winio/pkg/etwlogrus"
"github.com/Microsoft/go-winio/pkg/guid"
"github.com/containerd/ttrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"go.opencensus.io/plugin/ocgrpc"
@ -173,7 +174,7 @@ func run(clicontext *cli.Context) error {
// If a log dir was provided, make sure it exists.
if _, err := os.Stat(logDir); err != nil {
if err := os.MkdirAll(logDir, 0); err != nil {
return errors.Wrap(err, "failed to make log directory")
return fmt.Errorf("failed to make log directory: %w", err)
}
}
}
@ -208,7 +209,7 @@ func run(clicontext *cli.Context) error {
ctx := context.Background()
conf, err := loadConfig(configPath)
if err != nil {
return errors.Wrap(err, "failed getting configuration file")
return fmt.Errorf("failed getting configuration file: %w", err)
}
if conf.GRPCAddr == "" {
@ -269,7 +270,7 @@ func run(clicontext *cli.Context) error {
dir := filepath.Dir(dbPath)
if _, err := os.Stat(dir); err != nil {
if err := os.MkdirAll(dir, 0); err != nil {
return errors.Wrap(err, "failed to make database directory")
return fmt.Errorf("failed to make database directory: %w", err)
}
}
}
@ -306,7 +307,7 @@ func run(clicontext *cli.Context) error {
log.G(ctx).Info("Received interrupt. Closing")
case err := <-serveErr:
if err != nil {
return errors.Wrap(err, "server failure")
return fmt.Errorf("server failure: %w", err)
}
case <-serviceDone:
log.G(ctx).Info("Windows service stopped or shutdown")

Просмотреть файл

@ -4,6 +4,8 @@ package main
import (
"context"
"errors"
"fmt"
"net"
"strings"
"sync"
@ -16,7 +18,7 @@ import (
ncproxygrpc "github.com/Microsoft/hcsshim/pkg/ncproxy/ncproxygrpc/v1"
"github.com/Microsoft/hcsshim/pkg/octtrpc"
"github.com/containerd/ttrpc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"go.opencensus.io/plugin/ocgrpc"
@ -205,7 +207,7 @@ func reconnectComputeAgents(ctx context.Context, agentStore *ncproxystore.Comput
func disconnectComputeAgents(ctx context.Context, containerIDToComputeAgent *computeAgentCache) error {
agents, err := containerIDToComputeAgent.getAllAndClear()
if err != nil {
return errors.Wrapf(err, "failed to get all cached compute agent clients")
return fmt.Errorf("failed to get all cached compute agent clients: %w", err)
}
for _, agent := range agents {
if err := agent.Close(); err != nil {

Просмотреть файл

@ -4,6 +4,7 @@ package main
import (
"context"
"errors"
"net"
"path/filepath"
"testing"
@ -17,7 +18,7 @@ import (
nodenetsvc "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v1"
nodenetsvcMock "github.com/Microsoft/hcsshim/pkg/ncproxy/nodenetsvc/v1/mock"
"github.com/containerd/ttrpc"
"github.com/pkg/errors"
bolt "go.etcd.io/bbolt"
"go.uber.org/mock/gomock"
"google.golang.org/grpc/codes"

Просмотреть файл

@ -4,13 +4,14 @@ package main
import (
gcontext "context"
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
@ -74,14 +75,14 @@ var createScratchCommand = cli.Command{
convertUVM, err := uvm.CreateLCOW(ctx, opts)
if err != nil {
return errors.Wrapf(err, "failed to create '%s'", opts.ID)
return fmt.Errorf("failed to create %q: %w", opts.ID, err)
}
defer convertUVM.Close()
if err := convertUVM.Start(ctx); err != nil {
return errors.Wrapf(err, "failed to start '%s'", opts.ID)
return fmt.Errorf("failed to start %q: %w", opts.ID, err)
}
if err := lcow.CreateScratch(ctx, convertUVM, dest, sizeGB, context.String("cache-path")); err != nil {
return errors.Wrapf(err, "failed to create ext4vhdx for '%s'", opts.ID)
return fmt.Errorf("failed to create ext4vhdx for %q: %w", opts.ID, err)
}
return nil

Просмотреть файл

@ -4,13 +4,14 @@ package main
import (
gcontext "context"
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/appargs"
"github.com/Microsoft/hcsshim/internal/lcow"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
@ -51,14 +52,14 @@ var prepareDiskCommand = cli.Command{
preparediskUVM, err := uvm.CreateLCOW(ctx, opts)
if err != nil {
return errors.Wrapf(err, "failed to create '%s'", opts.ID)
return fmt.Errorf("failed to create %q: %w", opts.ID, err)
}
defer preparediskUVM.Close()
if err := preparediskUVM.Start(ctx); err != nil {
return errors.Wrapf(err, "failed to start '%s'", opts.ID)
return fmt.Errorf("failed to start %q: %w", opts.ID, err)
}
if err := lcow.FormatDisk(ctx, preparediskUVM, dest); err != nil {
return errors.Wrapf(err, "failed to format disk '%s' with ext4", opts.ID)
return fmt.Errorf("failed to format disk %q with ext4: %w", opts.ID, err)
}
return nil

Просмотреть файл

@ -5,6 +5,7 @@ package main
import (
gcontext "context"
"encoding/json"
"errors"
"fmt"
"io"
"net"
@ -16,7 +17,7 @@ import (
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/runhcs"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)

Просмотреть файл

@ -5,10 +5,10 @@ package main
// Simple wrappers around SetVolumeMountPoint and DeleteVolumeMountPoint
import (
"fmt"
"path/filepath"
"strings"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -16,7 +16,7 @@ import (
// https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setvolumemountpointw
func setVolumeMountPoint(targetPath string, volumePath string) error {
if !strings.HasPrefix(volumePath, "\\\\?\\Volume{") {
return errors.Errorf("unable to mount non-volume path %s", volumePath)
return fmt.Errorf("unable to mount non-volume path %s", volumePath)
}
// Both must end in a backslash
@ -25,16 +25,16 @@ func setVolumeMountPoint(targetPath string, volumePath string) error {
targetP, err := windows.UTF16PtrFromString(slashedTarget)
if err != nil {
return errors.Wrapf(err, "unable to utf16-ise %s", slashedTarget)
return fmt.Errorf("unable to utf16-ise %s: %w", slashedTarget, err)
}
volumeP, err := windows.UTF16PtrFromString(slashedVolume)
if err != nil {
return errors.Wrapf(err, "unable to utf16-ise %s", slashedVolume)
return fmt.Errorf("unable to utf16-ise %s: %w", slashedVolume, err)
}
if err := windows.SetVolumeMountPoint(targetP, volumeP); err != nil {
return errors.Wrapf(err, "failed calling SetVolumeMount('%s', '%s')", slashedTarget, slashedVolume)
return fmt.Errorf("failed calling SetVolumeMount(%q, %q): %w", slashedTarget, slashedVolume, err)
}
return nil
@ -48,11 +48,11 @@ func deleteVolumeMountPoint(targetPath string) error {
targetP, err := windows.UTF16PtrFromString(slashedTarget)
if err != nil {
return errors.Wrapf(err, "unable to utf16-ise %s", slashedTarget)
return fmt.Errorf("unable to utf16-ise %s: %w", slashedTarget, err)
}
if err := windows.DeleteVolumeMountPoint(targetP); err != nil {
return errors.Wrapf(err, "failed calling DeleteVolumeMountPoint('%s')", slashedTarget)
return fmt.Errorf("failed calling DeleteVolumeMountPoint(%q): %w", slashedTarget, err)
}
return nil

Просмотреть файл

@ -5,9 +5,10 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -34,7 +35,7 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L
err = hcsAttachLayerStorageFilter(layerPath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to attach layer storage filter")
return fmt.Errorf("failed to attach layer storage filter: %w", err)
}
return nil
}
@ -62,7 +63,7 @@ func AttachOverlayFilter(ctx context.Context, volumePath string, layerData Layer
err = hcsAttachOverlayFilter(volumePath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to attach overlay filter")
return fmt.Errorf("failed to attach overlay filter: %w", err)
}
return nil
}

Просмотреть файл

@ -4,9 +4,10 @@ package computestorage
import (
"context"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -22,7 +23,7 @@ func DestroyLayer(ctx context.Context, layerPath string) (err error) {
err = hcsDestroyLayer(layerPath)
if err != nil {
return errors.Wrap(err, "failed to destroy layer")
return fmt.Errorf("failed to destroy layer: %w", err)
}
return nil
}

Просмотреть файл

@ -5,10 +5,11 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -24,7 +25,7 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error)
err = hcsDetachLayerStorageFilter(layerPath)
if err != nil {
return errors.Wrap(err, "failed to detach layer storage filter")
return fmt.Errorf("failed to detach layer storage filter: %w", err)
}
return nil
}
@ -48,7 +49,7 @@ func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcss
err = hcsDetachOverlayFilter(volumePath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to detach overlay filter")
return fmt.Errorf("failed to detach overlay filter: %w", err)
}
return nil
}

Просмотреть файл

@ -5,9 +5,10 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -42,7 +43,7 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD
err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes))
if err != nil {
return errors.Wrap(err, "failed to export layer")
return fmt.Errorf("failed to export layer: %w", err)
}
return nil
}

Просмотреть файл

@ -4,9 +4,10 @@ package computestorage
import (
"context"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -26,7 +27,7 @@ func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err
err = hcsFormatWritableLayerVhd(vhdHandle)
if err != nil {
return errors.Wrap(err, "failed to format writable layer vhd")
return fmt.Errorf("failed to format writable layer vhd: %w", err)
}
return nil
}

Просмотреть файл

@ -4,13 +4,14 @@ package computestorage
import (
"context"
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/internal/memory"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
"github.com/Microsoft/hcsshim/internal/security"
@ -42,23 +43,23 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
// differencing disks if they exist in case we're asking for a different size.
if _, err := os.Stat(hivesPath); err == nil {
if err := os.RemoveAll(hivesPath); err != nil {
return errors.Wrap(err, "failed to remove prexisting hives directory")
return fmt.Errorf("failed to remove prexisting hives directory: %w", err)
}
}
if _, err := os.Stat(layoutPath); err == nil {
if err := os.RemoveAll(layoutPath); err != nil {
return errors.Wrap(err, "failed to remove prexisting layout file")
return fmt.Errorf("failed to remove prexisting layout file: %w", err)
}
}
if _, err := os.Stat(baseVhdPath); err == nil {
if err := os.RemoveAll(baseVhdPath); err != nil {
return errors.Wrap(err, "failed to remove base vhdx path")
return fmt.Errorf("failed to remove base vhdx path: %w", err)
}
}
if _, err := os.Stat(diffVhdPath); err == nil {
if err := os.RemoveAll(diffVhdPath); err != nil {
return errors.Wrap(err, "failed to remove differencing vhdx")
return fmt.Errorf("failed to remove differencing vhdx: %w", err)
}
}
@ -71,7 +72,7 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
if err != nil {
return errors.Wrap(err, "failed to create vhdx")
return fmt.Errorf("failed to create vhdx: %w", err)
}
defer func() {
@ -87,7 +88,7 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
}
// Base vhd handle must be closed before calling SetupBaseLayer in case of Container layer
if err = syscall.CloseHandle(handle); err != nil {
return errors.Wrap(err, "failed to close vhdx handle")
return fmt.Errorf("failed to close vhdx handle: %w", err)
}
options := OsLayerOptions{
@ -102,14 +103,14 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh
// Create the differencing disk that will be what's copied for the final rw layer
// for a container.
if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil {
return errors.Wrap(err, "failed to create differencing disk")
return fmt.Errorf("failed to create differencing disk: %w", err)
}
if err = security.GrantVmGroupAccess(baseVhdPath); err != nil {
return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath)
return fmt.Errorf("failed to grant vm group access to %s: %w", baseVhdPath, err)
}
if err = security.GrantVmGroupAccess(diffVhdPath); err != nil {
return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath)
return fmt.Errorf("failed to grant vm group access to %s: %w", diffVhdPath, err)
}
return nil
}
@ -128,12 +129,12 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
// Remove the base and differencing disks if they exist in case we're asking for a different size.
if _, err := os.Stat(baseVhdPath); err == nil {
if err := os.RemoveAll(baseVhdPath); err != nil {
return errors.Wrap(err, "failed to remove base vhdx")
return fmt.Errorf("failed to remove base vhdx: %w", err)
}
}
if _, err := os.Stat(diffVhdPath); err == nil {
if err := os.RemoveAll(diffVhdPath); err != nil {
return errors.Wrap(err, "failed to remove differencing vhdx")
return fmt.Errorf("failed to remove differencing vhdx: %w", err)
}
}
@ -147,7 +148,7 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
}
handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams)
if err != nil {
return errors.Wrap(err, "failed to create vhdx")
return fmt.Errorf("failed to create vhdx: %w", err)
}
defer func() {
@ -164,7 +165,7 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
Version: 2,
}
if err := vhd.AttachVirtualDisk(handle, vhd.AttachVirtualDiskFlagNone, attachParams); err != nil {
return errors.Wrapf(err, "failed to attach virtual disk")
return fmt.Errorf("failed to attach virtual disk: %w", err)
}
options := OsLayerOptions{
@ -177,23 +178,23 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP
// Detach and close the handle after setting up the layer as we don't need the handle
// for anything else and we no longer need to be attached either.
if err = vhd.DetachVirtualDisk(handle); err != nil {
return errors.Wrap(err, "failed to detach vhdx")
return fmt.Errorf("failed to detach vhdx: %w", err)
}
if err = syscall.CloseHandle(handle); err != nil {
return errors.Wrap(err, "failed to close vhdx handle")
return fmt.Errorf("failed to close vhdx handle: %w", err)
}
// Create the differencing disk that will be what's copied for the final rw layer
// for a container.
if err = vhd.CreateDiffVhd(diffVhdPath, baseVhdPath, defaultVHDXBlockSizeInMB); err != nil {
return errors.Wrap(err, "failed to create differencing disk")
return fmt.Errorf("failed to create differencing disk: %w", err)
}
if err := security.GrantVmGroupAccess(baseVhdPath); err != nil {
return errors.Wrapf(err, "failed to grant vm group access to %s", baseVhdPath)
return fmt.Errorf("failed to grant vm group access to %s: %w", baseVhdPath, err)
}
if err := security.GrantVmGroupAccess(diffVhdPath); err != nil {
return errors.Wrapf(err, "failed to grant vm group access to %s", diffVhdPath)
return fmt.Errorf("failed to grant vm group access to %s: %w", diffVhdPath, err)
}
return nil
}

Просмотреть файл

@ -5,9 +5,10 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -37,7 +38,7 @@ func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerD
err = hcsImportLayer(layerPath, sourceFolderPath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to import layer")
return fmt.Errorf("failed to import layer: %w", err)
}
return nil
}

Просмотреть файл

@ -5,9 +5,10 @@ package computestorage
import (
"context"
"encoding/json"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -34,7 +35,7 @@ func InitializeWritableLayer(ctx context.Context, layerPath string, layerData La
// Options are not used in the platform as of RS5
err = hcsInitializeWritableLayer(layerPath, string(bytes), "")
if err != nil {
return errors.Wrap(err, "failed to intitialize container layer")
return fmt.Errorf("failed to intitialize container layer: %w", err)
}
return nil
}

Просмотреть файл

@ -4,10 +4,11 @@ package computestorage
import (
"context"
"fmt"
"github.com/Microsoft/hcsshim/internal/interop"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -21,7 +22,7 @@ func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path s
var mountPath *uint16
err = hcsGetLayerVhdMountPath(vhdHandle, &mountPath)
if err != nil {
return "", errors.Wrap(err, "failed to get vhd mount path")
return "", fmt.Errorf("failed to get vhd mount path: %w", err)
}
path = interop.ConvertAndFreeCoTaskMemString(mountPath)
return path, nil

Просмотреть файл

@ -5,10 +5,12 @@ package computestorage
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/osversion"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/windows"
)
@ -38,7 +40,7 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H
err = hcsSetupBaseOSLayer(layerPath, vhdHandle, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to setup base OS layer")
return fmt.Errorf("failed to setup base OS layer: %w", err)
}
return nil
}
@ -74,7 +76,7 @@ func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, option
err = hcsSetupBaseOSVolume(layerPath, volumePath, string(bytes))
if err != nil {
return errors.Wrap(err, "failed to setup base OS layer")
return fmt.Errorf("failed to setup base OS layer: %w", err)
}
return nil
}

Просмотреть файл

@ -6,12 +6,11 @@ import (
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"github.com/pkg/errors"
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
"github.com/Microsoft/hcsshim/internal/memory"
)
@ -93,7 +92,7 @@ func MerkleTree(r io.Reader) ([]byte, error) {
if err == io.EOF {
break
}
return nil, errors.Wrap(err, "failed to read data block")
return nil, fmt.Errorf("failed to read data block: %w", err)
}
h := hash2(salt, block)
nextLevel.Write(h)
@ -116,7 +115,7 @@ func MerkleTree(r io.Reader) ([]byte, error) {
tree := bytes.NewBuffer(make([]byte, 0))
for i := len(layers) - 1; i >= 0; i-- {
if _, err := tree.Write(layers[i]); err != nil {
return nil, errors.Wrap(err, "failed to write merkle tree")
return nil, fmt.Errorf("failed to write merkle tree: %w", err)
}
}
@ -173,9 +172,9 @@ func ReadDMVerityInfo(vhdPath string, offsetInBytes int64) (*VerityInfo, error)
// Skip the ext4 data to get to dm-verity super block
if s, err := vhd.Seek(offsetInBytes, io.SeekStart); err != nil || s != offsetInBytes {
if err != nil {
return nil, errors.Wrap(err, "failed to seek dm-verity super block")
return nil, fmt.Errorf("failed to seek dm-verity super block: %w", err)
}
return nil, errors.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s)
return nil, fmt.Errorf("failed to seek dm-verity super block: expected bytes=%d, actual=%d", offsetInBytes, s)
}
return ReadDMVerityInfoReader(vhd)
@ -238,7 +237,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error {
tree, err := MerkleTree(r)
if err != nil {
return errors.Wrap(err, "failed to build merkle tree")
return fmt.Errorf("failed to build merkle tree: %w", err)
}
devSize, err := r.Seek(0, io.SeekEnd)
@ -253,7 +252,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error {
dmVeritySB := NewDMVeritySuperblock(uint64(devSize))
if err := binary.Write(w, binary.LittleEndian, dmVeritySB); err != nil {
return errors.Wrap(err, "failed to write dm-verity super-block")
return fmt.Errorf("failed to write dm-verity super-block: %w", err)
}
// write super-block padding
padding := bytes.Repeat([]byte{0}, blockSize-(sbSize%blockSize))
@ -262,7 +261,7 @@ func ComputeAndWriteHashDevice(r io.ReadSeeker, w io.Writer) error {
}
// write tree
if _, err := w.Write(tree); err != nil {
return errors.Wrap(err, "failed to write merkle tree")
return fmt.Errorf("failed to write merkle tree: %w", err)
}
return nil
}

Просмотреть файл

@ -4,6 +4,7 @@ import (
"archive/tar"
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
@ -14,7 +15,6 @@ import (
"github.com/Microsoft/hcsshim/ext4/internal/compactext4"
"github.com/Microsoft/hcsshim/ext4/internal/format"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/pkg/errors"
)
type params struct {
@ -109,7 +109,7 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro
}
if err = fs.MakeParents(name); err != nil {
return errors.Wrapf(err, "failed to ensure parent directories for %s", name)
return fmt.Errorf("failed to ensure parent directories for %s: %w", name, err)
}
if p.convertWhiteout {
@ -119,12 +119,12 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro
// Update the directory with the appropriate xattr.
f, err := fs.Stat(dir)
if err != nil {
return errors.Wrapf(err, "failed to stat parent directory of whiteout %s", file)
return fmt.Errorf("failed to stat parent directory of whiteout %s: %w", file, err)
}
f.Xattrs["trusted.overlay.opaque"] = []byte("y")
err = fs.Create(dir, f)
if err != nil {
return errors.Wrapf(err, "failed to create opaque dir %s", file)
return fmt.Errorf("failed to create opaque dir %s: %w", file, err)
}
} else {
// Create an overlay-style whiteout.
@ -135,7 +135,7 @@ func ConvertTarToExt4(r io.Reader, w io.ReadWriteSeeker, options ...Option) erro
}
err = fs.Create(path.Join(dir, file[len(whiteoutPrefix):]), f)
if err != nil {
return errors.Wrapf(err, "failed to create whiteout file for %s", file)
return fmt.Errorf("failed to create whiteout file for %s: %w", file, err)
}
}

2
go.mod
Просмотреть файл

@ -27,7 +27,6 @@ require (
github.com/opencontainers/runc v1.1.14
github.com/opencontainers/runtime-spec v1.2.0
github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.9.3
github.com/urfave/cli v1.22.15
github.com/vishvananda/netlink v1.3.0
@ -91,6 +90,7 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.20.2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect

Просмотреть файл

@ -3,9 +3,9 @@
package hcn
import (
"fmt"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/log"
@ -89,7 +89,7 @@ func getSupportedFeatures() (SupportedFeatures, error) {
globals, err := GetGlobals()
if err != nil {
// It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example.
return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.")
return SupportedFeatures{}, fmt.Errorf("failed to query HCN version number: this is expected on pre 1803 builds.: %w", err)
}
features.Acl = AclFeatures{
AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803),

Просмотреть файл

@ -4,6 +4,7 @@ package cmd
import (
"context"
"errors"
"fmt"
"io"
"net"
@ -16,7 +17,6 @@ import (
"github.com/Microsoft/go-winio"
"github.com/containerd/containerd/namespaces"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/log"
@ -101,7 +101,7 @@ func NewBinaryIO(ctx context.Context, id string, uri *url.URL) (_ UpstreamIO, er
select {
case err = <-errCh:
if err != nil {
return nil, errors.Wrap(err, "failed to start binary logger")
return nil, fmt.Errorf("failed to start binary logger: %w", err)
}
case <-time.After(binaryCmdStartTimeout):
return nil, errors.New("failed to start binary logger: timeout")
@ -275,7 +275,7 @@ func openNPipe(path string) (io.ReadWriteCloser, error) {
func (p *pipe) Write(b []byte) (int, error) {
p.conWg.Wait()
if p.conErr != nil {
return 0, errors.Wrap(p.conErr, "connection error")
return 0, fmt.Errorf("connection error: %w", p.conErr)
}
return p.con.Write(b)
}
@ -283,7 +283,7 @@ func (p *pipe) Write(b []byte) (int, error) {
func (p *pipe) Read(b []byte) (int, error) {
p.conWg.Wait()
if p.conErr != nil {
return 0, errors.Wrap(p.conErr, "connection error")
return 0, fmt.Errorf("connection error: %w", p.conErr)
}
return p.con.Read(b)
}

Просмотреть файл

@ -5,12 +5,12 @@ package cpugroup
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"github.com/Microsoft/hcsshim/internal/hcs"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/pkg/errors"
)
const NullGroupID = "00000000-0000-0000-0000-000000000000"
@ -50,7 +50,7 @@ func Create(ctx context.Context, id string, logicalProcessors []uint32) error {
LogicalProcessorCount: uint32(len(logicalProcessors)),
}
if err := modifyCPUGroupRequest(ctx, operation, details); err != nil {
return errors.Wrapf(err, "failed to make cpugroups CreateGroup request for details %+v", details)
return fmt.Errorf("failed to make cpugroups CreateGroup request for details %+v: %w", details, err)
}
return nil
}
@ -66,7 +66,7 @@ func GetCPUGroupConfig(ctx context.Context, id string) (*hcsschema.CpuGroupConfi
}
groupConfigs := &hcsschema.CpuGroupConfigurations{}
if err := json.Unmarshal(cpuGroupsPresent.Properties[0], groupConfigs); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal host cpugroups")
return nil, fmt.Errorf("failed to unmarshal host cpugroups: %w", err)
}
for _, c := range groupConfigs.CpuGroups {

Просмотреть файл

@ -12,7 +12,6 @@ import (
"github.com/Microsoft/hcsshim/internal/cmd"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/pkg/errors"
)
// AddDevice is the api exposed to oci/hcsoci to handle assigning a device on a WCOW UVM
@ -43,7 +42,7 @@ func AddDevice(ctx context.Context, vm *uvm.UtilityVM, idType, deviceID string,
if uvm.IsValidDeviceType(idType) {
vpci, err = vm.AssignDevice(ctx, deviceID, index, "")
if err != nil {
return vpci, nil, errors.Wrapf(err, "failed to assign device %s of type %s to pod %s", deviceID, idType, vm.ID())
return vpci, nil, fmt.Errorf("failed to assign device %s of type %s to pod %s: %w", deviceID, idType, vm.ID(), err)
}
vmBusInstanceID := vm.GetAssignedDeviceVMBUSInstanceID(vpci.VMBusGUID)
log.G(ctx).WithField("vmbus id", vmBusInstanceID).Info("vmbus instance ID")
@ -77,7 +76,7 @@ func getChildrenDeviceLocationPaths(ctx context.Context, vm *uvm.UtilityVM, vmBu
}
exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq)
if err != nil {
return nil, errors.Wrapf(err, "failed to find devices with exit code %d", exitCode)
return nil, fmt.Errorf("failed to find devices with exit code %d: %w", exitCode, err)
}
// wait to finish parsing stdout results

Просмотреть файл

@ -5,6 +5,7 @@ package devices
import (
"context"
"errors"
"fmt"
"io"
"net"
@ -15,7 +16,7 @@ import (
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/internal/winapi"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -53,7 +54,7 @@ func execPnPInstallDriver(ctx context.Context, vm *uvm.UtilityVM, driverDir stri
}
exitCode, err := cmd.ExecInUvm(ctx, vm, cmdReq)
if err != nil && exitCode != winapi.ERROR_NO_MORE_ITEMS {
return errors.Wrapf(err, "failed to install driver %s in uvm with exit code %d", driverDir, exitCode)
return fmt.Errorf("failed to install driver %s in uvm with exit code %d: %w", driverDir, exitCode, err)
} else if exitCode == winapi.ERROR_NO_MORE_ITEMS {
// As mentioned in `pnputilNoMoreItemsErrorMessage`, this exit code comes from pnputil
// but is not necessarily an error
@ -76,7 +77,7 @@ func readCsPipeOutput(l net.Listener, errChan chan<- error, result *[]string) {
defer close(errChan)
c, err := l.Accept()
if err != nil {
errChan <- errors.Wrapf(err, "failed to accept named pipe")
errChan <- fmt.Errorf("failed to accept named pipe: %w", err)
return
}
bytes, err := io.ReadAll(c)
@ -105,7 +106,7 @@ func readAllPipeOutput(l net.Listener, errChan chan<- error, result *string) {
defer close(errChan)
c, err := l.Accept()
if err != nil {
errChan <- errors.Wrapf(err, "failed to accept named pipe")
errChan <- fmt.Errorf("failed to accept named pipe: %w", err)
return
}
bytes, err := io.ReadAll(c)

Просмотреть файл

@ -7,6 +7,7 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net"
@ -20,7 +21,7 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)

Просмотреть файл

@ -13,12 +13,10 @@ import (
"io"
"math"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"go.opencensus.io/trace/tracestate"
@ -33,7 +31,7 @@ import (
// UnknownMessage represents the default handler logic for an unmatched request
// type sent from the bridge.
func UnknownMessage(r *Request) (RequestResponse, error) {
return nil, gcserr.WrapHresult(errors.Errorf("bridge: function not supported, header type: %v", r.Header.Type), gcserr.HrNotImpl)
return nil, gcserr.WrapHresult(fmt.Errorf("bridge: function not supported, header type: %v", r.Header.Type), gcserr.HrNotImpl)
}
// UnknownMessageHandler creates a default HandlerFunc out of the
@ -249,7 +247,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser
if err == io.ErrUnexpectedEOF || err == os.ErrClosed { //nolint:errorlint
break
}
recverr = errors.Wrap(err, "bridge: failed reading message header")
recverr = fmt.Errorf("bridge: failed reading message header: %w", err)
break
}
message := make([]byte, header.Size-prot.MessageHeaderSize)
@ -257,7 +255,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser
if err == io.ErrUnexpectedEOF || err == os.ErrClosed { //nolint:errorlint
break
}
recverr = errors.Wrap(err, "bridge: failed reading message payload")
recverr = fmt.Errorf("bridge: failed reading message payload: %w", err)
break
}
@ -373,17 +371,17 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser
for resp := range b.responseChan {
responseBytes, err := json.Marshal(resp.response)
if err != nil {
resperr = errors.Wrapf(err, "bridge: failed to marshal JSON for response \"%v\"", resp.response)
resperr = fmt.Errorf("bridge: failed to marshal JSON for response \"%v\": %w", resp.response, err)
break
}
resp.header.Size = uint32(len(responseBytes) + prot.MessageHeaderSize)
if err := binary.Write(bridgeOut, binary.LittleEndian, resp.header); err != nil {
resperr = errors.Wrap(err, "bridge: failed writing message header")
resperr = fmt.Errorf("bridge: failed writing message header: %w", err)
break
}
if _, err := bridgeOut.Write(responseBytes); err != nil {
resperr = errors.Wrap(err, "bridge: failed writing message payload")
resperr = fmt.Errorf("bridge: failed writing message payload: %w", err)
break
}
@ -415,7 +413,7 @@ func (b *Bridge) ListenAndServe(bridgeIn io.ReadCloser, bridgeOut io.WriteCloser
case <-time.After(time.Second * 5):
// Timeout expired first. Close the connection to unblock the read
if cerr := bridgeIn.Close(); cerr != nil {
err = errors.Wrap(cerr, "bridge: failed to close bridgeIn")
err = fmt.Errorf("bridge: failed to close bridgeIn: %w", cerr)
}
<-requestErrChan
}
@ -455,21 +453,6 @@ func setErrorForResponseBase(response *prot.MessageResponseBase, errForResponse
// (Still keep using -1 for backwards compatibility ...)
lineNumber := uint32(math.MaxUint32)
functionName := ""
if stack := gcserr.BaseStackTrace(errForResponse); stack != nil {
bottomFrame := stack[0]
stackString = fmt.Sprintf("%+v", stack)
fileName = fmt.Sprintf("%s", bottomFrame)
lineNumberStr := fmt.Sprintf("%d", bottomFrame)
if n, err := strconv.ParseUint(lineNumberStr, 10, 32); err == nil {
lineNumber = uint32(n)
} else {
logrus.WithFields(logrus.Fields{
"line-number": lineNumberStr,
logrus.ErrorKey: err,
}).Error("opengcs::bridge::setErrorForResponseBase - failed to parse line number, using -1 instead")
}
functionName = fmt.Sprintf("%n", bottomFrame)
}
hresult, err := gcserr.GetHresult(errForResponse)
if err != nil {
// Default to using the generic failure HRESULT.

Просмотреть файл

@ -6,6 +6,8 @@ package bridge
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strings"
@ -15,7 +17,7 @@ import (
"github.com/Microsoft/hcsshim/internal/guest/gcserr"
"github.com/Microsoft/hcsshim/internal/guest/prot"
"github.com/Microsoft/hcsshim/internal/guest/transport"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -385,7 +387,7 @@ func serverSend(conn io.Writer, messageType prot.MessageIdentifier, messageID pr
var err error
body, err = json.Marshal(i)
if err != nil {
return errors.Wrap(err, "failed to json marshal to server.")
return fmt.Errorf("failed to json marshal to server.: %w", err)
}
}
@ -397,11 +399,11 @@ func serverSend(conn io.Writer, messageType prot.MessageIdentifier, messageID pr
// Send the header.
if err := binary.Write(conn, binary.LittleEndian, header); err != nil {
return errors.Wrap(err, "bridge_test: failed to write message header")
return fmt.Errorf("bridge_test: failed to write message header: %w", err)
}
// Send the body.
if _, err := conn.Write(body); err != nil {
return errors.Wrap(err, "bridge_test: failed to write the message body")
return fmt.Errorf("bridge_test: failed to write the message body: %w", err)
}
return nil
}
@ -410,12 +412,12 @@ func serverRead(conn io.Reader) (*prot.MessageHeader, []byte, error) {
header := &prot.MessageHeader{}
// Read the header.
if err := binary.Read(conn, binary.LittleEndian, header); err != nil {
return nil, nil, errors.Wrap(err, "bridge_test: failed to read message header")
return nil, nil, fmt.Errorf("bridge_test: failed to read message header: %w", err)
}
message := make([]byte, header.Size-prot.MessageHeaderSize)
// Read the body.
if _, err := io.ReadFull(conn, message); err != nil {
return nil, nil, errors.Wrap(err, "bridge_test: failed to read the message body")
return nil, nil, fmt.Errorf("bridge_test: failed to read the message body: %w", err)
}
return header, message, nil

Просмотреть файл

@ -6,10 +6,11 @@ package bridge
import (
"context"
"encoding/json"
"errors"
"fmt"
"syscall"
"time"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
@ -53,7 +54,7 @@ func (b *Bridge) negotiateProtocolV2(r *Request) (_ RequestResponse, err error)
var request prot.NegotiateProtocol
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
if request.MaximumVersion < uint32(prot.PvV4) || uint32(prot.PvMax) < request.MinimumVersion {
@ -89,18 +90,16 @@ func (b *Bridge) createContainerV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerCreate
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
var settingsV2 prot.VMHostedContainerSettingsV2
if err := commonutils.UnmarshalJSONWithHresult([]byte(request.ContainerConfig), &settingsV2); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON for ContainerConfig \"%s\"", request.ContainerConfig)
return nil, fmt.Errorf("failed to unmarshal JSON for ContainerConfig %q: %w", request.ContainerConfig, err)
}
if settingsV2.SchemaVersion.Cmp(prot.SchemaVersion{Major: 2, Minor: 1}) < 0 {
return nil, gcserr.WrapHresult(
errors.Errorf("invalid schema version: %v", settingsV2.SchemaVersion),
gcserr.HrVmcomputeInvalidJSON)
return nil, gcserr.WrapHresult(fmt.Errorf("invalid schema version: %v", settingsV2.SchemaVersion), gcserr.HrVmcomputeInvalidJSON)
}
c, err := b.hostState.CreateContainer(ctx, request.ContainerID, &settingsV2)
@ -144,7 +143,7 @@ func (b *Bridge) startContainerV2(r *Request) (_ RequestResponse, err error) {
// returned to the HCS.
var request prot.MessageBase
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
return &prot.MessageResponseBase{}, nil
@ -173,14 +172,14 @@ func (b *Bridge) execProcessV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerExecuteProcess
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
// The request contains a JSON string field which is equivalent to an
// ExecuteProcessInfo struct.
var params prot.ProcessParameters
if err := commonutils.UnmarshalJSONWithHresult([]byte(request.Settings.ProcessParameters), &params); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON for ProcessParameters \"%s\"", request.Settings.ProcessParameters)
return nil, fmt.Errorf("failed to unmarshal JSON for ProcessParameters %q: %w", request.Settings.ProcessParameters, err)
}
var conSettings stdio.ConnectionSettings
@ -195,7 +194,6 @@ func (b *Bridge) execProcessV2(r *Request) (_ RequestResponse, err error) {
}
pid, err := b.hostState.ExecProcess(ctx, request.ContainerID, params, conSettings)
if err != nil {
return nil, err
}
@ -243,7 +241,7 @@ func (b *Bridge) signalContainerShutdownV2(ctx context.Context, span *trace.Span
var request prot.MessageBase
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
// If this is targeting the UVM send the request to the host itself.
@ -270,7 +268,7 @@ func (b *Bridge) signalProcessV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerSignalProcess
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
span.AddAttributes(
@ -299,14 +297,14 @@ func (b *Bridge) getPropertiesV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerGetProperties
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
var query prot.PropertyQuery
if len(request.Query) != 0 {
if err := json.Unmarshal([]byte(request.Query), &query); err != nil {
e := gcserr.WrapHresult(err, gcserr.HrVmcomputeInvalidJSON)
return nil, errors.Wrapf(e, "The query could not be unmarshaled: '%s'", query)
return nil, fmt.Errorf("The query %q could not be unmarshaled: %w", query, e)
}
}
@ -324,7 +322,7 @@ func (b *Bridge) getPropertiesV2(r *Request) (_ RequestResponse, err error) {
var err error
propertyJSON, err = json.Marshal(properties)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%+v\"", properties)
return nil, fmt.Errorf("failed to unmarshal JSON in message \"%+v\": %w", properties, err)
}
}
@ -341,7 +339,7 @@ func (b *Bridge) waitOnProcessV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerWaitForProcess
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
span.AddAttributes(
@ -396,7 +394,7 @@ func (b *Bridge) resizeConsoleV2(r *Request) (_ RequestResponse, err error) {
var request prot.ContainerResizeConsole
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
span.AddAttributes(
@ -430,7 +428,7 @@ func (b *Bridge) modifySettingsV2(r *Request) (_ RequestResponse, err error) {
request, err := prot.UnmarshalContainerModifySettings(r.Message)
if err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
err = b.hostState.ModifySettings(ctx, request.ContainerID, request.Request.(*guestrequest.ModificationRequest))
@ -464,7 +462,7 @@ func (b *Bridge) deleteContainerStateV2(r *Request) (_ RequestResponse, err erro
var request prot.MessageBase
if err := commonutils.UnmarshalJSONWithHresult(r.Message, &request); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal JSON in message \"%s\"", r.Message)
return nil, fmt.Errorf("failed to unmarshal JSON in message %q: %w", r.Message, err)
}
c, err := b.hostState.GetCreatedContainer(request.ContainerID)

Просмотреть файл

@ -1,10 +1,8 @@
package gcserr
import (
"errors"
"fmt"
"io"
"github.com/pkg/errors"
)
// Hresult is a type corresponding to the HRESULT error type used on Windows.
@ -56,39 +54,6 @@ const (
// TODO: update implementation to use go1.13 style errors with `errors.As` and co.
// StackTracer is an interface originating (but not exported) from the
// github.com/pkg/errors package. It defines something which can return a stack
// trace.
type StackTracer interface {
StackTrace() errors.StackTrace
}
// BaseStackTrace gets the earliest errors.StackTrace in the given error's cause
// stack. This will be the stack trace which reaches closest to the error's
// actual origin. It returns nil if no stack trace is found in the cause stack.
func BaseStackTrace(e error) errors.StackTrace {
type causer interface {
Cause() error
}
cause := e
var tracer StackTracer
for cause != nil {
serr, ok := cause.(StackTracer) //nolint:errorlint
if ok {
tracer = serr
}
cerr, ok := cause.(causer) //nolint:errorlint
if !ok {
break
}
cause = cerr.Cause()
}
if tracer == nil {
return nil
}
return tracer.StackTrace()
}
type baseHresultError struct {
hresult Hresult
}
@ -96,6 +61,7 @@ type baseHresultError struct {
func (e *baseHresultError) Error() string {
return fmt.Sprintf("HRESULT: 0x%x", uint32(e.Hresult()))
}
func (e *baseHresultError) Hresult() Hresult {
return e.hresult
}
@ -106,38 +72,16 @@ type wrappingHresultError struct {
}
func (e *wrappingHresultError) Error() string {
return fmt.Sprintf("HRESULT 0x%x", uint32(e.Hresult())) + ": " + e.Cause().Error()
return fmt.Sprintf("HRESULT 0x%x", uint32(e.Hresult())) + ": " + e.Unwrap().Error()
}
func (e *wrappingHresultError) Hresult() Hresult {
return e.hresult
}
func (e *wrappingHresultError) Cause() error {
func (e *wrappingHresultError) Unwrap() error {
return e.cause
}
func (e *wrappingHresultError) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
if s.Flag('+') {
fmt.Fprintf(s, "%+v\n", e.Cause())
return
}
fallthrough
case 's':
_, _ = io.WriteString(s, e.Error())
case 'q':
fmt.Fprintf(s, "%q", e.Error())
}
}
func (e *wrappingHresultError) StackTrace() errors.StackTrace {
type stackTracer interface {
StackTrace() errors.StackTrace
}
serr, ok := e.Cause().(stackTracer) //nolint:errorlint
if !ok {
return nil
}
return serr.StackTrace()
}
// NewHresultError produces a new error with the given HRESULT.
func NewHresultError(hresult Hresult) error {
@ -146,6 +90,8 @@ func NewHresultError(hresult Hresult) error {
// WrapHresult produces a new error with the given HRESULT and wrapping the
// given error.
//
// Deprecated: use [fmt.Errorf] with %w and [NewHresultError] instead.
func WrapHresult(e error, hresult Hresult) error {
return &wrappingHresultError{
cause: e,
@ -153,29 +99,14 @@ func WrapHresult(e error, hresult Hresult) error {
}
}
// GetHresult iterates through the error's cause stack (similar to how the
// Cause function in github.com/pkg/errors operates). At the first error it
// encounters which implements the Hresult() method, it return's that error's
// HRESULT. This allows errors higher up in the cause stack to shadow the
// HRESULTs of errors lower down.
// GetHresult returns the topmost HRESULT of an error, if possible, or an error.
func GetHresult(e error) (Hresult, error) {
type hresulter interface {
Hresult() Hresult
}
type causer interface {
Cause() error
var herr hresulter
if errors.As(e, &herr) {
return herr.Hresult(), nil
}
cause := e
for cause != nil {
herr, ok := cause.(hresulter) //nolint:errorlint
if ok {
return herr.Hresult(), nil
}
cerr, ok := cause.(causer) //nolint:errorlint
if !ok {
break
}
cause = cerr.Cause()
}
return -1, errors.Errorf("no HRESULT found in cause stack for error %s", e)
return -1, fmt.Errorf("no HRESULT found in stack for error %s", e)
}

Просмотреть файл

@ -5,6 +5,7 @@ package network
import (
"context"
"errors"
"fmt"
"net"
"os/exec"
@ -14,7 +15,7 @@ import (
"github.com/Microsoft/hcsshim/internal/guest/prot"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"github.com/vishvananda/netns"
@ -26,15 +27,15 @@ func MoveInterfaceToNS(ifStr string, pid int) error {
// Get a reference to the interface and make sure it's down
link, err := netlink.LinkByName(ifStr)
if err != nil {
return errors.Wrapf(err, "netlink.LinkByName(%s) failed", ifStr)
return fmt.Errorf("netlink.LinkByName(%s) failed: %w", ifStr, err)
}
if err := netlink.LinkSetDown(link); err != nil {
return errors.Wrapf(err, "netlink.LinkSetDown(%#v) failed", link)
return fmt.Errorf("netlink.LinkSetDown(%#v) failed: %w", link, err)
}
// Move the interface to the new network namespace
if err := netlink.LinkSetNsPid(link, pid); err != nil {
return errors.Wrapf(err, "netlink.SetNsPid(%#v, %d) failed", link, pid)
return fmt.Errorf("netlink.SetNsPid(%#v, %d) failed: %w", link, pid, err)
}
return nil
}
@ -49,12 +50,12 @@ func DoInNetNS(ns netns.NsHandle, run func() error) error {
origNs, err := netns.Get()
if err != nil {
return errors.Wrap(err, "failed to get current network namespace")
return fmt.Errorf("failed to get current network namespace: %w", err)
}
defer origNs.Close()
if err := netns.Set(ns); err != nil {
return errors.Wrapf(err, "failed to set network namespace to %v", ns)
return fmt.Errorf("failed to set network namespace to %v: %w", ns, err)
}
// Defer so we can re-enter the threads original netns on exit.
defer netns.Set(origNs) //nolint:errcheck
@ -79,7 +80,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net
entry.Trace("Obtaining current namespace")
ns, err := netns.Get()
if err != nil {
return errors.Wrap(err, "netns.Get() failed")
return fmt.Errorf("netns.Get() failed: %w", err)
}
defer ns.Close()
entry.WithField("namespace", ns).Debug("New network namespace from PID")
@ -88,7 +89,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net
entry.Trace("Getting reference to interface")
link, err := netlink.LinkByName(ifStr)
if err != nil {
return errors.Wrapf(err, "netlink.LinkByName(%s) failed", ifStr)
return fmt.Errorf("netlink.LinkByName(%s) failed: %w", ifStr, err)
}
// User requested non-default MTU size
@ -96,7 +97,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net
mtu := link.Attrs().MTU - int(adapter.EncapOverhead)
entry.WithField("mtu", mtu).Debug("EncapOverhead non-zero, will set MTU")
if err = netlink.LinkSetMTU(link, mtu); err != nil {
return errors.Wrapf(err, "netlink.LinkSetMTU(%#v, %d) failed", link, mtu)
return fmt.Errorf("netlink.LinkSetMTU(%#v, %d) failed: %w", link, mtu, err)
}
}
@ -112,7 +113,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net
// Bring the interface up
if err := netlink.LinkSetUp(link); err != nil {
return errors.Wrapf(err, "netlink.LinkSetUp(%#v) failed", link)
return fmt.Errorf("netlink.LinkSetUp(%#v) failed: %w", link, err)
}
if err := assignIPToLink(ctx, ifStr, nsPid, link,
adapter.AllocatedIPAddress, adapter.HostIPAddress, adapter.HostIPPrefixLength,
@ -156,7 +157,7 @@ func NetNSConfig(ctx context.Context, ifStr string, nsPid int, adapter *prot.Net
}
if err != nil {
entry.WithError(err).Debugf("udhcpc failed [%s]", cos)
return errors.Wrapf(err, "process failed (%s)", cos)
return fmt.Errorf("process failed (%s): %w", cos, err)
}
}
var cos string
@ -210,7 +211,7 @@ func assignIPToLink(ctx context.Context,
// Set IP address
ip, addr, err := net.ParseCIDR(allocatedIP + "/" + strconv.FormatUint(uint64(prefixLen), 10))
if err != nil {
return errors.Wrapf(err, "parsing address %s/%d failed", allocatedIP, prefixLen)
return fmt.Errorf("parsing address %s/%d failed: %w", allocatedIP, prefixLen, err)
}
// the IP address field in addr is masked, so replace it with the original ip address
addr.IP = ip
@ -220,7 +221,7 @@ func assignIPToLink(ctx context.Context,
}).Debugf("parsed ip address %s/%d", allocatedIP, prefixLen)
ipAddr := &netlink.Addr{IPNet: addr, Label: ""}
if err := netlink.AddrAdd(link, ipAddr); err != nil {
return errors.Wrapf(err, "netlink.AddrAdd(%#v, %#v) failed", link, ipAddr)
return fmt.Errorf("netlink.AddrAdd(%#v, %#v) failed: %w", link, ipAddr, err)
}
if gatewayIP == "" {
return nil
@ -228,7 +229,7 @@ func assignIPToLink(ctx context.Context,
// Set gateway
gw := net.ParseIP(gatewayIP)
if gw == nil {
return errors.Wrapf(err, "parsing gateway address %s failed", gatewayIP)
return fmt.Errorf("parsing gateway address %s failed: %w", gatewayIP, err)
}
if !addr.Contains(gw) {
@ -243,7 +244,7 @@ func assignIPToLink(ctx context.Context,
Mask: net.CIDRMask(ml, ml)}
ipAddr2 := &netlink.Addr{IPNet: addr2, Label: ""}
if err := netlink.AddrAdd(link, ipAddr2); err != nil {
return errors.Wrapf(err, "netlink.AddrAdd(%#v, %#v) failed", link, ipAddr2)
return fmt.Errorf("netlink.AddrAdd(%#v, %#v) failed: %w", link, ipAddr2, err)
}
}
@ -262,7 +263,7 @@ func assignIPToLink(ctx context.Context,
rule.Priority = 5
if err := netlink.RuleAdd(rule); err != nil {
return errors.Wrapf(err, "netlink.RuleAdd(%#v) failed", rule)
return fmt.Errorf("netlink.RuleAdd(%#v) failed: %w", rule, err)
}
table = rule.Table
}
@ -275,7 +276,7 @@ func assignIPToLink(ctx context.Context,
Priority: metric,
}
if err := netlink.RouteAdd(&route); err != nil {
return errors.Wrapf(err, "netlink.RouteAdd(%#v) failed", route)
return fmt.Errorf("netlink.RouteAdd(%#v) failed: %w", route, err)
}
return nil
}

Просмотреть файл

@ -17,7 +17,7 @@ import (
"github.com/Microsoft/hcsshim/internal/guest/storage/vmbus"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
)
@ -69,7 +69,7 @@ func GenerateResolvConfContent(ctx context.Context, searches, servers, options [
trace.StringAttribute("options", strings.Join(options, ", ")))
if len(searches) > maxDNSSearches {
return "", errors.Errorf("searches has more than %d domains", maxDNSSearches)
return "", fmt.Errorf("searches has more than %d domains", maxDNSSearches)
}
content := ""
@ -136,7 +136,7 @@ func InstanceIDToName(ctx context.Context, id string, vpciAssigned bool) (_ stri
netDevicePath, err = vmbusWaitForDevicePath(ctx, vmBusNetSubPath)
}
if err != nil {
return "", errors.Wrapf(err, "failed to find adapter %v sysfs path", vmBusID)
return "", fmt.Errorf("failed to find adapter %v sysfs path: %w", vmBusID, err)
}
var deviceDirs []os.DirEntry
@ -146,22 +146,22 @@ func InstanceIDToName(ctx context.Context, id string, vpciAssigned bool) (_ stri
if os.IsNotExist(err) {
select {
case <-ctx.Done():
return "", errors.Wrap(ctx.Err(), "timed out waiting for net adapter")
return "", fmt.Errorf("timed out waiting for net adapter: %w", ctx.Err())
default:
time.Sleep(10 * time.Millisecond)
continue
}
} else {
return "", errors.Wrapf(err, "failed to read vmbus network device from /sys filesystem for adapter %s", vmBusID)
return "", fmt.Errorf("failed to read vmbus network device from /sys filesystem for adapter %s: %w", vmBusID, err)
}
}
break
}
if len(deviceDirs) == 0 {
return "", errors.Errorf("no interface name found for adapter %s", vmBusID)
return "", fmt.Errorf("no interface name found for adapter %s", vmBusID)
}
if len(deviceDirs) > 1 {
return "", errors.Errorf("multiple interface names found for adapter %s", vmBusID)
return "", fmt.Errorf("multiple interface names found for adapter %s", vmBusID)
}
ifname := deviceDirs[0].Name()
log.G(ctx).WithField("ifname", ifname).Debug("resolved ifname")

Просмотреть файл

@ -5,11 +5,11 @@ package prot
import (
"encoding/json"
"fmt"
"strconv"
v1 "github.com/containerd/cgroups/v3/cgroup1/stats"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/Microsoft/hcsshim/internal/guest/commonutils"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
@ -518,14 +518,14 @@ func UnmarshalContainerModifySettings(b []byte) (*ContainerModifySettings, error
var requestRawSettings json.RawMessage
request.Request = &requestRawSettings
if err := commonutils.UnmarshalJSONWithHresult(b, &request); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal ContainerModifySettings")
return nil, fmt.Errorf("failed to unmarshal ContainerModifySettings: %w", err)
}
var msr guestrequest.ModificationRequest
var msrRawSettings json.RawMessage
msr.Settings = &msrRawSettings
if err := commonutils.UnmarshalJSONWithHresult(requestRawSettings, &msr); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal request.Settings as ModifySettingRequest")
return &request, fmt.Errorf("failed to unmarshal request.Settings as ModifySettingRequest: %w", err)
}
if msr.RequestType == "" {
@ -537,65 +537,65 @@ func UnmarshalContainerModifySettings(b []byte) (*ContainerModifySettings, error
case guestresource.ResourceTypeSCSIDevice:
msd := &guestresource.SCSIDevice{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, msd); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as SCSIDevice")
return &request, fmt.Errorf("failed to unmarshal settings as SCSIDevice: %w", err)
}
msr.Settings = msd
case guestresource.ResourceTypeMappedVirtualDisk:
mvd := &guestresource.LCOWMappedVirtualDisk{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, mvd); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as MappedVirtualDiskV2")
return &request, fmt.Errorf("failed to unmarshal settings as MappedVirtualDiskV2: %w", err)
}
msr.Settings = mvd
case guestresource.ResourceTypeMappedDirectory:
md := &guestresource.LCOWMappedDirectory{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, md); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as MappedDirectoryV2")
return &request, fmt.Errorf("failed to unmarshal settings as MappedDirectoryV2: %w", err)
}
msr.Settings = md
case guestresource.ResourceTypeVPMemDevice:
vpd := &guestresource.LCOWMappedVPMemDevice{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, vpd); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal hosted settings as MappedVPMemDeviceV2")
return &request, fmt.Errorf("failed to unmarshal hosted settings as MappedVPMemDeviceV2: %w", err)
}
msr.Settings = vpd
case guestresource.ResourceTypeCombinedLayers:
cl := &guestresource.LCOWCombinedLayers{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, cl); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as CombinedLayersV2")
return &request, fmt.Errorf("failed to unmarshal settings as CombinedLayersV2: %w", err)
}
msr.Settings = cl
case guestresource.ResourceTypeNetwork:
na := &guestresource.LCOWNetworkAdapter{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, na); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as NetworkAdapterV2")
return &request, fmt.Errorf("failed to unmarshal settings as NetworkAdapterV2: %w", err)
}
msr.Settings = na
case guestresource.ResourceTypeVPCIDevice:
vd := &guestresource.LCOWMappedVPCIDevice{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, vd); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as MappedVPCIDeviceV2")
return &request, fmt.Errorf("failed to unmarshal settings as MappedVPCIDeviceV2: %w", err)
}
msr.Settings = vd
case guestresource.ResourceTypeContainerConstraints:
cc := &guestresource.LCOWContainerConstraints{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, cc); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as ContainerConstraintsV2")
return &request, fmt.Errorf("failed to unmarshal settings as ContainerConstraintsV2: %w", err)
}
msr.Settings = cc
case guestresource.ResourceTypeSecurityPolicy:
enforcer := &guestresource.LCOWConfidentialOptions{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, enforcer); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as LCOWConfidentialOptions")
return &request, fmt.Errorf("failed to unmarshal settings as LCOWConfidentialOptions: %w", err)
}
msr.Settings = enforcer
case guestresource.ResourceTypePolicyFragment:
fragment := &guestresource.LCOWSecurityPolicyFragment{}
if err := commonutils.UnmarshalJSONWithHresult(msrRawSettings, fragment); err != nil {
return &request, errors.Wrap(err, "failed to unmarshal settings as LCOWSecurityPolicyFragment")
return &request, fmt.Errorf("failed to unmarshal settings as LCOWSecurityPolicyFragment: %w", err)
}
msr.Settings = fragment
default:
return &request, errors.Errorf("invalid ResourceType '%s'", msr.ResourceType)
return &request, fmt.Errorf("invalid ResourceType %q", msr.ResourceType)
}
request.Request = &msr
return &request, nil

Просмотреть файл

@ -14,7 +14,7 @@ import (
cgroups "github.com/containerd/cgroups/v3/cgroup1"
v1 "github.com/containerd/cgroups/v3/cgroup1/stats"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
@ -267,7 +267,7 @@ func (c *Container) GetStats(ctx context.Context) (*v1.Metrics, error) {
cgroupPath := c.spec.Linux.CgroupsPath
cg, err := cgroups.Load(cgroups.StaticPath(cgroupPath))
if err != nil {
return nil, errors.Errorf("failed to get container stats for %v: %v", c.id, err)
return nil, fmt.Errorf("failed to get container stats for %v: %v", c.id, err)
}
return cg.Stat(cgroups.IgnoreNotExist)

Просмотреть файл

@ -10,7 +10,6 @@ import (
"sync"
"time"
"github.com/pkg/errors"
"github.com/vishvananda/netns"
"go.opencensus.io/trace"
@ -46,7 +45,7 @@ func getNetworkNamespace(id string) (*namespace, error) {
ns, ok := namespaces[id]
if !ok {
return nil, gcserr.WrapHresult(errors.Errorf("namespace '%s' not found", id), gcserr.HrErrNotFound)
return nil, gcserr.WrapHresult(fmt.Errorf("namespace %q not found", id), gcserr.HrErrNotFound)
}
return ns, nil
}
@ -86,7 +85,7 @@ func RemoveNetworkNamespace(ctx context.Context, id string) (err error) {
ns.m.Lock()
defer ns.m.Unlock()
if len(ns.nics) > 0 {
return errors.Errorf("network namespace '%s' contains adapters", id)
return fmt.Errorf("network namespace %q contains adapters", id)
}
delete(namespaces, id)
}
@ -123,7 +122,7 @@ func (n *namespace) AssignContainerPid(ctx context.Context, pid int) (err error)
defer n.m.Unlock()
if n.pid != 0 {
return errors.Errorf("previously assigned container pid %d to network namespace %q", n.pid, n.id)
return fmt.Errorf("previously assigned container pid %d to network namespace %q", n.pid, n.id)
}
n.pid = pid
@ -159,7 +158,7 @@ func (n *namespace) AddAdapter(ctx context.Context, adp *guestresource.LCOWNetwo
for _, nic := range n.nics {
if strings.EqualFold(nic.adapter.ID, adp.ID) {
return errors.Errorf("adapter with id: '%s' already present in namespace", adp.ID)
return fmt.Errorf("adapter with id: %q already present in namespace", adp.ID)
}
}
@ -265,13 +264,13 @@ func (nin *nicInNamespace) assignToPid(ctx context.Context, pid int) (err error)
}
if err := network.MoveInterfaceToNS(nin.ifname, pid); err != nil {
return errors.Wrapf(err, "failed to move interface %s to network namespace", nin.ifname)
return fmt.Errorf("failed to move interface %s to network namespace: %w", nin.ifname, err)
}
// Get a reference to the new network namespace
ns, err := netns.GetFromPid(pid)
if err != nil {
return errors.Wrapf(err, "netns.GetFromPid(%d) failed", pid)
return fmt.Errorf("netns.GetFromPid(%d) failed: %w", pid, err)
}
defer ns.Close()
@ -280,7 +279,7 @@ func (nin *nicInNamespace) assignToPid(ctx context.Context, pid int) (err error)
}
if err := network.DoInNetNS(ns, netNSCfg); err != nil {
return errors.Wrapf(err, "failed to configure adapter aid: %s, if id: %s", nin.adapter.ID, nin.ifname)
return fmt.Errorf("failed to configure adapter aid: %s, if id: %s: %w", nin.adapter.ID, nin.ifname, err)
}
nin.assignedPid = pid
return nil

Просмотреть файл

@ -12,7 +12,6 @@ import (
"strings"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/Microsoft/hcsshim/cmd/gcstools/generichook"
"github.com/Microsoft/hcsshim/internal/guest/storage/pci"
@ -29,7 +28,7 @@ func addNvidiaDeviceHook(ctx context.Context, spec *oci.Spec, ociBundlePath stri
genericHookBinary := "generichook"
genericHookPath, err := exec.LookPath(genericHookBinary)
if err != nil {
return errors.Wrapf(err, "failed to find %s for container device support", genericHookBinary)
return fmt.Errorf("failed to find %s for container device support: %w", genericHookBinary, err)
}
toolDebugPath := filepath.Join(ociBundlePath, nvidiaDebugFilePath)
@ -54,7 +53,7 @@ func addNvidiaDeviceHook(ctx context.Context, spec *oci.Spec, ociBundlePath stri
case "gpu":
busLocation, err := pci.FindDeviceBusLocationFromVMBusGUID(ctx, d.ID)
if err != nil {
return errors.Wrapf(err, "failed to find nvidia gpu bus location")
return fmt.Errorf("failed to find nvidia gpu bus location: %w", err)
}
args = append(args, fmt.Sprintf("--device=%s", busLocation))
}

Просмотреть файл

@ -5,6 +5,7 @@ package hcsv2
import (
"context"
"errors"
"fmt"
"os/exec"
"sync"
@ -17,7 +18,7 @@ import (
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@ -233,7 +234,7 @@ func newExternalProcess(ctx context.Context, cmd *exec.Cmd, tty *stdio.TtyRelay,
remove: onRemove,
}
if err := cmd.Start(); err != nil {
return nil, errors.Wrap(err, "failed to call Start for external process")
return nil, fmt.Errorf("failed to call Start for external process: %w", err)
}
if tty != nil {
tty.Start()

Просмотреть файл

@ -5,12 +5,13 @@ package hcsv2
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"github.com/Microsoft/hcsshim/internal/guest/network"
@ -40,7 +41,7 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) (
// Generate the sandbox root dir
rootDir := specInternal.SandboxRootDir(id)
if err := os.MkdirAll(rootDir, 0755); err != nil {
return errors.Wrapf(err, "failed to create sandbox root directory %q", rootDir)
return fmt.Errorf("failed to create sandbox root directory %q: %w", rootDir, err)
}
defer func() {
if err != nil {
@ -54,20 +55,20 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) (
var err error
hostname, err = os.Hostname()
if err != nil {
return errors.Wrap(err, "failed to get hostname")
return fmt.Errorf("failed to get hostname: %w", err)
}
}
sandboxHostnamePath := getSandboxHostnamePath(id)
if err := os.WriteFile(sandboxHostnamePath, []byte(hostname+"\n"), 0644); err != nil {
return errors.Wrapf(err, "failed to write hostname to %q", sandboxHostnamePath)
return fmt.Errorf("failed to write hostname to %q: %w", sandboxHostnamePath, err)
}
// Write the hosts
sandboxHostsContent := network.GenerateEtcHostsContent(ctx, hostname)
sandboxHostsPath := getSandboxHostsPath(id)
if err := os.WriteFile(sandboxHostsPath, []byte(sandboxHostsContent), 0644); err != nil {
return errors.Wrapf(err, "failed to write sandbox hosts to %q", sandboxHostsPath)
return fmt.Errorf("failed to write sandbox hosts to %q: %w", sandboxHostsPath, err)
}
// Write resolv.conf
@ -86,11 +87,11 @@ func setupSandboxContainerSpec(ctx context.Context, id string, spec *oci.Spec) (
}
resolvContent, err := network.GenerateResolvConfContent(ctx, searches, servers, nil)
if err != nil {
return errors.Wrap(err, "failed to generate sandbox resolv.conf content")
return fmt.Errorf("failed to generate sandbox resolv.conf content: %w", err)
}
sandboxResolvPath := getSandboxResolvPath(id)
if err := os.WriteFile(sandboxResolvPath, []byte(resolvContent), 0644); err != nil {
return errors.Wrap(err, "failed to write sandbox resolv.conf")
return fmt.Errorf("failed to write sandbox resolv.conf: %w", err)
}
// User.Username is generally only used on Windows, but as there's no (easy/fast at least) way to grab

Просмотреть файл

@ -5,6 +5,7 @@ package hcsv2
import (
"context"
"errors"
"fmt"
"math"
"path/filepath"
@ -15,7 +16,6 @@ import (
"github.com/opencontainers/runc/libcontainer/devices"
"github.com/opencontainers/runc/libcontainer/user"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/pkg/annotations"
@ -71,11 +71,11 @@ func setCoreRLimit(spec *oci.Spec, value string) error {
soft, err := strconv.ParseUint(vals[0], 10, 64)
if err != nil {
return errors.Wrap(err, "failed to parse soft core rlimit")
return fmt.Errorf("failed to parse soft core rlimit: %w", err)
}
hard, err := strconv.ParseUint(vals[1], 10, 64)
if err != nil {
return errors.Wrap(err, "failed to parse hard core rlimit")
return fmt.Errorf("failed to parse hard core rlimit: %w", err)
}
spec.Process.Rlimits = append(spec.Process.Rlimits, oci.POSIXRlimit{
@ -117,7 +117,7 @@ func setUserStr(spec *oci.Spec, userstr string) error {
return setUsername(spec, userstr)
}
if outOfUint32Bounds(v) {
return errors.Errorf("UID (%d) exceeds uint32 bounds", v)
return fmt.Errorf("UID (%d) exceeds uint32 bounds", v)
}
return setUserID(spec, uint32(v))
case 2:
@ -131,7 +131,7 @@ func setUserStr(spec *oci.Spec, userstr string) error {
username = parts[0]
} else {
if outOfUint32Bounds(v) {
return errors.Errorf("UID (%d) exceeds uint32 bounds", v)
return fmt.Errorf("UID (%d) exceeds uint32 bounds", v)
}
uid = uint32(v)
}
@ -141,7 +141,7 @@ func setUserStr(spec *oci.Spec, userstr string) error {
groupname = parts[1]
} else {
if outOfUint32Bounds(v) {
return errors.Errorf("GID (%d) for user %q exceeds uint32 bounds", v, parts[0])
return fmt.Errorf("GID (%d) for user %q exceeds uint32 bounds", v, parts[0])
}
gid = uint32(v)
}
@ -151,11 +151,11 @@ func setUserStr(spec *oci.Spec, userstr string) error {
return u.Name == username
})
if err != nil {
return errors.Wrapf(err, "failed to find user by username: %s", username)
return fmt.Errorf("failed to find user by username: %s: %w", username, err)
}
if outOfUint32Bounds(u.Uid) {
return errors.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username)
return fmt.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username)
}
uid = uint32(u.Uid)
}
@ -164,11 +164,11 @@ func setUserStr(spec *oci.Spec, userstr string) error {
return g.Name == groupname
})
if err != nil {
return errors.Wrapf(err, "failed to find group by groupname: %s", groupname)
return fmt.Errorf("failed to find group by groupname: %s: %w", groupname, err)
}
if outOfUint32Bounds(g.Gid) {
return errors.Errorf("GID (%d) for groupname %q exceeds uint32 bounds", g.Gid, groupname)
return fmt.Errorf("GID (%d) for groupname %q exceeds uint32 bounds", g.Gid, groupname)
}
gid = uint32(g.Gid)
}
@ -185,13 +185,13 @@ func setUsername(spec *oci.Spec, username string) error {
return u.Name == username
})
if err != nil {
return errors.Wrapf(err, "failed to find user by username: %s", username)
return fmt.Errorf("failed to find user by username: %s: %w", username, err)
}
if outOfUint32Bounds(u.Uid) {
return errors.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username)
return fmt.Errorf("UID (%d) for username %q exceeds uint32 bounds", u.Uid, username)
}
if outOfUint32Bounds(u.Gid) {
return errors.Errorf("GID (%d) for username %q exceeds uint32 bounds", u.Gid, username)
return fmt.Errorf("GID (%d) for username %q exceeds uint32 bounds", u.Gid, username)
}
spec.Process.User.UID, spec.Process.User.GID = uint32(u.Uid), uint32(u.Gid)
return nil
@ -207,7 +207,7 @@ func setUserID(spec *oci.Spec, uid uint32) error {
}
if outOfUint32Bounds(u.Gid) {
return errors.Errorf("GID (%d) for UID %d exceeds uint32 bounds", u.Gid, uid)
return fmt.Errorf("GID (%d) for UID %d exceeds uint32 bounds", u.Gid, uid)
}
spec.Process.User.UID, spec.Process.User.GID = uid, uint32(u.Gid)
return nil
@ -219,7 +219,7 @@ func getUser(spec *oci.Spec, filter func(user.User) bool) (user.User, error) {
return user.User{}, err
}
if len(users) != 1 {
return user.User{}, errors.Errorf("expected exactly 1 user matched '%d'", len(users))
return user.User{}, fmt.Errorf("expected exactly 1 user matched '%d'", len(users))
}
return users[0], nil
}
@ -230,7 +230,7 @@ func getGroup(spec *oci.Spec, filter func(user.Group) bool) (user.Group, error)
return user.Group{}, err
}
if len(groups) != 1 {
return user.Group{}, errors.Errorf("expected exactly 1 group matched '%d'", len(groups))
return user.Group{}, fmt.Errorf("expected exactly 1 group matched '%d'", len(groups))
}
return groups[0], nil
}

Просмотреть файл

@ -5,6 +5,7 @@ package hcsv2
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
@ -14,7 +15,6 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/opencontainers/runc/libcontainer/devices"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
const (
@ -42,12 +42,12 @@ func addAssignedDevice(ctx context.Context, spec *oci.Spec) error {
// validate that the device is available
fullPCIPath, err := pci.FindDeviceFullPath(ctx, d.ID)
if err != nil {
return errors.Wrapf(err, "failed to find device pci path for device %v", d)
return fmt.Errorf("failed to find device pci path for device %v: %w", d, err)
}
// find the device nodes that link to the pci path we just got
devs, err := devicePathsFromPCIPath(ctx, fullPCIPath)
if err != nil {
return errors.Wrapf(err, "failed to find dev node for device %v", d)
return fmt.Errorf("failed to find dev node for device %v: %w", d, err)
}
for _, dev := range devs {
addLinuxDeviceToSpec(ctx, dev, spec, true)

Просмотреть файл

@ -5,12 +5,13 @@ package hcsv2
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"github.com/Microsoft/hcsshim/internal/guest/network"
@ -44,7 +45,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec
// Generate the standalone root dir
rootDir := getStandaloneRootDir(id)
if err := os.MkdirAll(rootDir, 0755); err != nil {
return errors.Wrapf(err, "failed to create container root directory %q", rootDir)
return fmt.Errorf("failed to create container root directory %q: %w", rootDir, err)
}
defer func() {
if err != nil {
@ -57,7 +58,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec
var err error
hostname, err = os.Hostname()
if err != nil {
return errors.Wrap(err, "failed to get hostname")
return fmt.Errorf("failed to get hostname: %w", err)
}
}
@ -65,7 +66,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec
if !specInternal.MountPresent("/etc/hostname", spec.Mounts) {
standaloneHostnamePath := getStandaloneHostnamePath(id)
if err := os.WriteFile(standaloneHostnamePath, []byte(hostname+"\n"), 0644); err != nil {
return errors.Wrapf(err, "failed to write hostname to %q", standaloneHostnamePath)
return fmt.Errorf("failed to write hostname to %q: %w", standaloneHostnamePath, err)
}
mt := oci.Mount{
@ -85,7 +86,7 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec
standaloneHostsContent := network.GenerateEtcHostsContent(ctx, hostname)
standaloneHostsPath := getStandaloneHostsPath(id)
if err := os.WriteFile(standaloneHostsPath, []byte(standaloneHostsContent), 0644); err != nil {
return errors.Wrapf(err, "failed to write standalone hosts to %q", standaloneHostsPath)
return fmt.Errorf("failed to write standalone hosts to %q: %w", standaloneHostsPath, err)
}
mt := oci.Mount{
@ -114,11 +115,11 @@ func setupStandaloneContainerSpec(ctx context.Context, id string, spec *oci.Spec
}
resolvContent, err := network.GenerateResolvConfContent(ctx, searches, servers, nil)
if err != nil {
return errors.Wrap(err, "failed to generate standalone resolv.conf content")
return fmt.Errorf("failed to generate standalone resolv.conf content: %w", err)
}
standaloneResolvPath := getStandaloneResolvPath(id)
if err := os.WriteFile(standaloneResolvPath, []byte(resolvContent), 0644); err != nil {
return errors.Wrap(err, "failed to write standalone resolv.conf")
return fmt.Errorf("failed to write standalone resolv.conf: %w", err)
}
mt := oci.Mount{

Просмотреть файл

@ -9,6 +9,7 @@ import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"os"
@ -45,7 +46,7 @@ import (
"github.com/Microsoft/hcsshim/pkg/securitypolicy"
"github.com/mattn/go-shellwords"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -178,7 +179,7 @@ func (h *Host) InjectFragment(ctx context.Context, fragment *guestresource.LCOWS
sha.Write(blob)
timestamp := time.Now()
fragmentPath := fmt.Sprintf("fragment-%x-%d.blob", sha.Sum(nil), timestamp.UnixMilli())
_ = os.WriteFile(filepath.Join("/tmp", fragmentPath), blob, 0644)
_ = os.WriteFile(filepath.Join("/tmp", fragmentPath), blob, 0o644)
unpacked, err := cosesign1.UnpackAndValidateCOSE1CertChain(raw)
if err != nil {
@ -277,8 +278,8 @@ func (h *Host) AddContainer(id string, c *Container) error {
func setupSandboxMountsPath(id string) (err error) {
mountPath := spec.SandboxMountsDir(id)
if err := os.MkdirAll(mountPath, 0755); err != nil {
return errors.Wrapf(err, "failed to create sandboxMounts dir in sandbox %v", id)
if err := os.MkdirAll(mountPath, 0o755); err != nil {
return fmt.Errorf("failed to create sandboxMounts dir in sandbox %v: %w", id, err)
}
defer func() {
if err != nil {
@ -291,8 +292,8 @@ func setupSandboxMountsPath(id string) (err error) {
func setupSandboxHugePageMountsPath(id string) error {
mountPath := spec.HugePagesMountsDir(id)
if err := os.MkdirAll(mountPath, 0755); err != nil {
return errors.Wrapf(err, "failed to create hugepage Mounts dir in sandbox %v", id)
if err := os.MkdirAll(mountPath, 0o755); err != nil {
return fmt.Errorf("failed to create hugepage Mounts dir in sandbox %v: %w", id, err)
}
return storage.MountRShared(mountPath)
@ -361,7 +362,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM
sid, ok := settings.OCISpecification.Annotations[annotations.KubernetesSandboxID]
sandboxID = sid
if !ok || sid == "" {
return nil, errors.Errorf("unsupported 'io.kubernetes.cri.sandbox-id': '%s'", sid)
return nil, fmt.Errorf("unsupported %q: %q", annotations.KubernetesSandboxID, sid)
}
if err := setupWorkloadContainerSpec(ctx, sid, id, settings.OCISpecification, settings.OCIBundlePath); err != nil {
return nil, err
@ -385,7 +386,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM
return nil, err
}
default:
return nil, errors.Errorf("unsupported 'io.kubernetes.cri.container-type': '%s'", criType)
return nil, fmt.Errorf("unsupported %q: %q", annotations.KubernetesContainerType, criType)
}
} else {
// Capture namespaceID if any because setupStandaloneContainerSpec clears the Windows section.
@ -431,7 +432,7 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM
seccomp,
)
if err != nil {
return nil, errors.Wrapf(err, "container creation denied due to policy")
return nil, fmt.Errorf("container creation denied due to policy: %w", err)
}
if !allowStdio {
@ -465,23 +466,23 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM
return nil, fmt.Errorf("failed to create security context directory: %w", err)
}
// Make sure that files inside directory are readable
if err := os.Chmod(securityContextDir, 0755); err != nil {
if err := os.Chmod(securityContextDir, 0o755); err != nil {
return nil, fmt.Errorf("failed to chmod security context directory: %w", err)
}
if len(encodedPolicy) > 0 {
if err := writeFileInDir(securityContextDir, securitypolicy.PolicyFilename, []byte(encodedPolicy), 0744); err != nil {
if err := writeFileInDir(securityContextDir, securitypolicy.PolicyFilename, []byte(encodedPolicy), 0o744); err != nil {
return nil, fmt.Errorf("failed to write security policy: %w", err)
}
}
if len(h.uvmReferenceInfo) > 0 {
if err := writeFileInDir(securityContextDir, securitypolicy.ReferenceInfoFilename, []byte(h.uvmReferenceInfo), 0744); err != nil {
if err := writeFileInDir(securityContextDir, securitypolicy.ReferenceInfoFilename, []byte(h.uvmReferenceInfo), 0o744); err != nil {
return nil, fmt.Errorf("failed to write UVM reference info: %w", err)
}
}
if len(hostAMDCert) > 0 {
if err := writeFileInDir(securityContextDir, securitypolicy.HostAMDCertFilename, []byte(hostAMDCert), 0744); err != nil {
if err := writeFileInDir(securityContextDir, securitypolicy.HostAMDCertFilename, []byte(hostAMDCert), 0o744); err != nil {
return nil, fmt.Errorf("failed to write host AMD certificate: %w", err)
}
}
@ -493,30 +494,30 @@ func (h *Host) CreateContainer(ctx context.Context, id string, settings *prot.VM
}
// Create the BundlePath
if err := os.MkdirAll(settings.OCIBundlePath, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create OCIBundlePath: '%s'", settings.OCIBundlePath)
if err := os.MkdirAll(settings.OCIBundlePath, 0o700); err != nil {
return nil, fmt.Errorf("failed to create OCIBundlePath: %w", err)
}
configFile := path.Join(settings.OCIBundlePath, "config.json")
f, err := os.Create(configFile)
if err != nil {
return nil, errors.Wrapf(err, "failed to create config.json at: '%s'", configFile)
return nil, fmt.Errorf("failed to create config.json: %w", err)
}
defer f.Close()
writer := bufio.NewWriter(f)
if err := json.NewEncoder(writer).Encode(settings.OCISpecification); err != nil {
return nil, errors.Wrapf(err, "failed to write OCISpecification to config.json at: '%s'", configFile)
return nil, fmt.Errorf("failed to write OCISpecification to config.json at %q: %w", configFile, err)
}
if err := writer.Flush(); err != nil {
return nil, errors.Wrapf(err, "failed to flush writer for config.json at: '%s'", configFile)
return nil, fmt.Errorf("failed to flush writer for config.json at %q: %w", configFile, err)
}
con, err := h.rtime.CreateContainer(id, settings.OCIBundlePath, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create container")
return nil, fmt.Errorf("failed to create container: %w", err)
}
init, err := con.GetInitProcess()
if err != nil {
return nil, errors.Wrapf(err, "failed to get container init process")
return nil, fmt.Errorf("failed to get container init process: %w", err)
}
c.container = con
@ -619,7 +620,7 @@ func (h *Host) modifyHostSettings(ctx context.Context, containerID string, req *
}
return h.InjectFragment(ctx, r)
default:
return errors.Errorf("the ResourceType %q is not supported for UVM", req.ResourceType)
return fmt.Errorf("the ResourceType %q is not supported for UVM", req.ResourceType)
}
}
@ -633,7 +634,7 @@ func (h *Host) modifyContainerSettings(ctx context.Context, containerID string,
case guestresource.ResourceTypeContainerConstraints:
return c.modifyContainerConstraints(ctx, req.RequestType, req.Settings.(*guestresource.LCOWContainerConstraints))
default:
return errors.Errorf("the ResourceType \"%s\" is not supported for containers", req.ResourceType)
return fmt.Errorf("the ResourceType %q is not supported for containers", req.ResourceType)
}
}
@ -716,7 +717,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot.
params.WorkingDirectory,
)
if err != nil {
return pid, errors.Wrapf(err, "exec is denied due to policy")
return pid, fmt.Errorf("exec is denied due to policy: %w", err)
}
// It makes no sense to allow access if stdio access is denied and the
@ -729,7 +730,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot.
params.Environment = processOCIEnvToParam(envToKeep)
}
var tport = h.vsock
tport := h.vsock
if !allowStdioAccess {
tport = h.devNullTransport
}
@ -769,7 +770,7 @@ func (h *Host) ExecProcess(ctx context.Context, containerID string, params prot.
params.OCIProcess.Capabilities,
)
if err != nil {
return pid, errors.Wrapf(err, "exec in container denied due to policy")
return pid, fmt.Errorf("exec in container denied due to policy: %w", err)
}
// It makes no sense to allow access if stdio access is denied and the
@ -807,7 +808,7 @@ func (h *Host) GetExternalProcess(pid int) (Process, error) {
func (h *Host) GetProperties(ctx context.Context, containerID string, query prot.PropertyQuery) (*prot.PropertiesV2, error) {
err := h.securityPolicyEnforcer.EnforceGetPropertiesPolicy(ctx)
if err != nil {
return nil, errors.Wrapf(err, "get properties denied due to policy")
return nil, fmt.Errorf("get properties denied due to policy: %w", err)
}
c, err := h.GetCreatedContainer(containerID)
@ -825,7 +826,7 @@ func (h *Host) GetProperties(ctx context.Context, containerID string, query prot
properties.ProcessList = make([]prot.ProcessDetails, len(pids))
for i, pid := range pids {
if outOfUint32Bounds(pid) {
return nil, errors.Errorf("PID (%d) exceeds uint32 bounds", pid)
return nil, fmt.Errorf("PID (%d) exceeds uint32 bounds", pid)
}
properties.ProcessList[i].ProcessID = uint32(pid)
}
@ -844,7 +845,7 @@ func (h *Host) GetProperties(ctx context.Context, containerID string, query prot
func (h *Host) GetStacks(ctx context.Context) (string, error) {
err := h.securityPolicyEnforcer.EnforceDumpStacksPolicy(ctx)
if err != nil {
return "", errors.Wrapf(err, "dump stacks denied due to policy")
return "", fmt.Errorf("dump stacks denied due to policy: %w", err)
}
return debug.DumpStacks(), nil
@ -889,7 +890,7 @@ func (h *Host) runExternalProcess(
)
master, consolePath, err = stdio.NewConsole()
if err != nil {
return -1, errors.Wrap(err, "failed to create console for external process")
return -1, fmt.Errorf("failed to create console for external process: %w", err)
}
defer func() {
if err != nil {
@ -898,9 +899,9 @@ func (h *Host) runExternalProcess(
}()
var console *os.File
console, err = os.OpenFile(consolePath, os.O_RDWR|syscall.O_NOCTTY, 0777)
console, err = os.OpenFile(consolePath, os.O_RDWR|syscall.O_NOCTTY, 0o777)
if err != nil {
return -1, errors.Wrap(err, "failed to open console file for external process")
return -1, fmt.Errorf("failed to open console file for external process: %w", err)
}
defer console.Close()
@ -919,7 +920,7 @@ func (h *Host) runExternalProcess(
var fileSet *stdio.FileSet
fileSet, err = stdioSet.Files()
if err != nil {
return -1, errors.Wrap(err, "failed to set cmd stdio")
return -1, fmt.Errorf("failed to set cmd stdio: %w", err)
}
defer fileSet.Close()
defer stdioSet.Close()
@ -945,7 +946,7 @@ func (h *Host) runExternalProcess(
}
func newInvalidRequestTypeError(rt guestrequest.RequestType) error {
return errors.Errorf("the RequestType %q is not supported", rt)
return fmt.Errorf("the RequestType %q is not supported", rt)
}
func modifySCSIDevice(
@ -1000,7 +1001,7 @@ func modifyMappedVirtualDisk(
}
err = securityPolicy.EnforceDeviceMountPolicy(ctx, mvd.MountPath, deviceHash)
if err != nil {
return errors.Wrapf(err, "mounting scsi device controller %d lun %d onto %s denied by policy", mvd.Controller, mvd.Lun, mvd.MountPath)
return fmt.Errorf("mounting scsi device controller %d lun %d onto %s denied by policy: %w", mvd.Controller, mvd.Lun, mvd.MountPath, err)
}
}
config := &scsi.Config{
@ -1050,14 +1051,14 @@ func modifyMappedDirectory(
case guestrequest.RequestTypeAdd:
err = securityPolicy.EnforcePlan9MountPolicy(ctx, md.MountPath)
if err != nil {
return errors.Wrapf(err, "mounting plan9 device at %s denied by policy", md.MountPath)
return fmt.Errorf("mounting plan9 device at %s denied by policy: %w", md.MountPath, err)
}
return plan9.Mount(ctx, vsock, md.MountPath, md.ShareName, uint32(md.Port), md.ReadOnly)
case guestrequest.RequestTypeRemove:
err = securityPolicy.EnforcePlan9UnmountPolicy(ctx, md.MountPath)
if err != nil {
return errors.Wrapf(err, "unmounting plan9 device at %s denied by policy", md.MountPath)
return fmt.Errorf("unmounting plan9 device at %s denied by policy: %w", md.MountPath, err)
}
return storage.UnmountPath(ctx, md.MountPath, true)
@ -1087,13 +1088,13 @@ func modifyMappedVPMemDevice(ctx context.Context,
case guestrequest.RequestTypeAdd:
err = securityPolicy.EnforceDeviceMountPolicy(ctx, vpd.MountPath, deviceHash)
if err != nil {
return errors.Wrapf(err, "mounting pmem device %d onto %s denied by policy", vpd.DeviceNumber, vpd.MountPath)
return fmt.Errorf("mounting pmem device %d onto %s denied by policy: %w", vpd.DeviceNumber, vpd.MountPath, err)
}
return pmem.Mount(ctx, vpd.DeviceNumber, vpd.MountPath, vpd.MappingInfo, verityInfo)
case guestrequest.RequestTypeRemove:
if err := securityPolicy.EnforceDeviceUnmountPolicy(ctx, vpd.MountPath); err != nil {
return errors.Wrapf(err, "unmounting pmem device from %s denied by policy", vpd.MountPath)
return fmt.Errorf("unmounting pmem device from %s denied by policy: %w", vpd.MountPath, err)
}
return pmem.Unmount(ctx, vpd.DeviceNumber, vpd.MountPath, vpd.MappingInfo, verityInfo)
@ -1147,7 +1148,7 @@ func modifyCombinedLayers(
return overlay.MountLayer(ctx, layerPaths, upperdirPath, workdirPath, cl.ContainerRootPath, readonly)
case guestrequest.RequestTypeRemove:
if err := securityPolicy.EnforceOverlayUnmountPolicy(ctx, cl.ContainerRootPath); err != nil {
return errors.Wrap(err, "overlay removal denied by policy")
return fmt.Errorf("overlay removal denied by policy: %w", err)
}
return storage.UnmountPath(ctx, cl.ContainerRootPath, true)
@ -1183,7 +1184,7 @@ func modifyNetwork(ctx context.Context, rt guestrequest.RequestType, na *guestre
func processParamCommandLineToOCIArgs(commandLine string) ([]string, error) {
args, err := shellwords.Parse(commandLine)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse command line string \"%s\"", commandLine)
return nil, fmt.Errorf("failed to parse command line string %q: %w", commandLine, err)
}
return args, nil
}

Просмотреть файл

@ -12,7 +12,7 @@ import (
"github.com/opencontainers/runc/libcontainer/devices"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
@ -40,7 +40,7 @@ func updateSandboxMounts(sbid string, spec *oci.Spec) error {
// filepath.Join cleans the resulting path before returning, so it would resolve the relative path if one was given.
// Hence, we need to ensure that the resolved path is still under the correct directory
if !strings.HasPrefix(sandboxSource, specInternal.SandboxMountsDir(sbid)) {
return errors.Errorf("mount path %v for mount %v is not within sandbox's mounts dir", sandboxSource, m.Source)
return fmt.Errorf("mount path %v for mount %v is not within sandbox's mounts dir", sandboxSource, m.Source)
}
spec.Mounts[i].Source = sandboxSource
@ -67,7 +67,7 @@ func updateHugePageMounts(sbid string, spec *oci.Spec) error {
// filepath.Join cleans the resulting path before returning so it would resolve the relative path if one was given.
// Hence, we need to ensure that the resolved path is still under the correct directory
if !strings.HasPrefix(hugePageMountSource, mountsDir) {
return errors.Errorf("mount path %v for mount %v is not within hugepages's mounts dir", hugePageMountSource, m.Source)
return fmt.Errorf("mount path %v for mount %v is not within hugepages's mounts dir", hugePageMountSource, m.Source)
}
spec.Mounts[i].Source = hugePageMountSource
@ -78,7 +78,7 @@ func updateHugePageMounts(sbid string, spec *oci.Spec) error {
return err
}
if err := unix.Mount("none", hugePageMountSource, "hugetlbfs", 0, "pagesize="+pageSize); err != nil {
return errors.Errorf("mount operation failed for %v failed with error %v", hugePageMountSource, err)
return fmt.Errorf("mount operation failed for %v failed with error %v", hugePageMountSource, err)
}
}
}
@ -144,16 +144,16 @@ func setupWorkloadContainerSpec(ctx context.Context, sbid, id string, spec *oci.
// Verify no hostname
if spec.Hostname != "" {
return errors.Errorf("workload container must not change hostname: %s", spec.Hostname)
return fmt.Errorf("workload container must not change hostname: %s", spec.Hostname)
}
// update any sandbox mounts with the sandboxMounts directory path and create files
if err = updateSandboxMounts(sbid, spec); err != nil {
return errors.Wrapf(err, "failed to update sandbox mounts for container %v in sandbox %v", id, sbid)
return fmt.Errorf("failed to update sandbox mounts for container %v in sandbox %v: %w", id, sbid, err)
}
if err = updateHugePageMounts(sbid, spec); err != nil {
return errors.Wrapf(err, "failed to update hugepages mounts for container %v in sandbox %v", id, sbid)
return fmt.Errorf("failed to update hugepages mounts for container %v in sandbox %v: %w", id, sbid, err)
}
if err = updateBlockDeviceMounts(spec); err != nil {
@ -201,7 +201,7 @@ func setupWorkloadContainerSpec(ctx context.Context, sbid, id string, spec *oci.
}
// add other assigned devices to the spec
if err := addAssignedDevice(ctx, spec); err != nil {
return errors.Wrap(err, "failed to add assigned device(s) to the container spec")
return fmt.Errorf("failed to add assigned device(s) to the container spec: %w", err)
}
}

Просмотреть файл

@ -5,6 +5,8 @@ package runc
import (
"encoding/json"
"errors"
"fmt"
"net"
"os"
"path/filepath"
@ -13,7 +15,7 @@ import (
"syscall"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@ -59,7 +61,7 @@ func (c *container) Start() error {
if err != nil {
runcErr := getRuncLogError(logPath)
c.r.cleanupContainer(c.id) //nolint:errcheck
return errors.Wrapf(runcErr, "runc start failed with %v: %s", err, string(out))
return fmt.Errorf("runc start failed with %v: %s: %w", err, string(out), runcErr)
}
return nil
}
@ -86,7 +88,7 @@ func (c *container) Kill(signal syscall.Signal) error {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return errors.Wrapf(runcErr, "unknown runc error after kill %v: %s", err, string(out))
return fmt.Errorf("unknown runc error after kill %v: %s: %w", err, string(out), runcErr)
}
return nil
}
@ -99,7 +101,7 @@ func (c *container) Delete() error {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return errors.Wrapf(runcErr, "runc delete failed with %v: %s", err, string(out))
return fmt.Errorf("runc delete failed with %v: %s: %w", err, string(out), runcErr)
}
return c.r.cleanupContainer(c.id)
}
@ -110,7 +112,7 @@ func (c *container) Pause() error {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return errors.Wrapf(runcErr, "runc pause failed with %v: %s", err, string(out))
return fmt.Errorf("runc pause failed with %v: %s: %w", err, string(out), runcErr)
}
return nil
}
@ -123,7 +125,7 @@ func (c *container) Resume() error {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := getRuncLogError(logPath)
return errors.Wrapf(runcErr, "runc resume failed with %v: %s", err, string(out))
return fmt.Errorf("runc resume failed with %v: %s: %w", err, string(out), runcErr)
}
return nil
}
@ -134,11 +136,11 @@ func (c *container) GetState() (*runtime.ContainerState, error) {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return nil, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out))
return nil, fmt.Errorf("runc state failed with %v: %s: %w", err, string(out), runcErr)
}
var state runtime.ContainerState
if err := json.Unmarshal(out, &state); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal the state for container %s", c.id)
return nil, fmt.Errorf("failed to unmarshal the state for container %s: %w", c.id, err)
}
return &state, nil
}
@ -156,7 +158,7 @@ func (c *container) Exists() (bool, error) {
if errors.Is(runcErr, runtime.ErrContainerDoesNotExist) {
return false, nil
}
return false, errors.Wrapf(runcErr, "runc state failed with %v: %s", err, string(out))
return false, fmt.Errorf("runc state failed with %v: %s: %w", err, string(out), runcErr)
}
return true, nil
}
@ -189,13 +191,13 @@ func (c *container) GetRunningProcesses() ([]runtime.ContainerProcessState, erro
// that the process was created by the Runtime.
processDirs, err := os.ReadDir(filepath.Join(containerFilesDir, c.id))
if err != nil {
return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id))
return nil, fmt.Errorf("failed to read the contents of container directory %s: %w", filepath.Join(containerFilesDir, c.id), err)
}
for _, processDir := range processDirs {
if processDir.Name() != initPidFilename {
pid, err := strconv.Atoi(processDir.Name())
if err != nil {
return nil, errors.Wrapf(err, "failed to parse string \"%s\" as pid", processDir.Name())
return nil, fmt.Errorf("failed to parse pid: %w", err)
}
if _, ok := pidMap[pid]; ok {
pidMap[pid].CreatedByRuntime = true
@ -236,7 +238,7 @@ func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) {
processDirs, err := os.ReadDir(filepath.Join(containerFilesDir, c.id))
if err != nil {
return nil, errors.Wrapf(err, "failed to read the contents of container directory %s", filepath.Join(containerFilesDir, c.id))
return nil, fmt.Errorf("failed to read the contents of container directory: %w", err)
}
// Loop over every process state directory. Since these processes have
// process state directories, CreatedByRuntime will be true for all of them.
@ -244,7 +246,7 @@ func (c *container) GetAllProcesses() ([]runtime.ContainerProcessState, error) {
if processDir.Name() != initPidFilename {
pid, err := strconv.Atoi(processDir.Name())
if err != nil {
return nil, errors.Wrapf(err, "failed to parse string \"%s\" into pid", processDir.Name())
return nil, fmt.Errorf("failed to parse pid: %w", err)
}
if c.r.processExists(pid) {
// If the process exists in /proc and is in the pidMap, it must
@ -317,11 +319,11 @@ func (c *container) runExecCommand(processDef *oci.Process, stdioSet *stdio.Conn
f, err := os.Create(filepath.Join(tempProcessDir, "process.json"))
if err != nil {
return nil, errors.Wrapf(err, "failed to create process.json file at %s", filepath.Join(tempProcessDir, "process.json"))
return nil, fmt.Errorf("failed to create process.json file at %s: %w", filepath.Join(tempProcessDir, "process.json"), err)
}
defer f.Close()
if err := json.NewEncoder(f).Encode(processDef); err != nil {
return nil, errors.Wrap(err, "failed to encode JSON into process.json file")
return nil, fmt.Errorf("failed to encode JSON into process.json file: %w", err)
}
args := []string{"exec"}
@ -342,7 +344,7 @@ func (c *container) startProcess(
args := initialArgs
if err := setSubreaper(1); err != nil {
return nil, errors.Wrapf(err, "failed to set process as subreaper for process in container %s", c.id)
return nil, fmt.Errorf("failed to set process as subreaper for process in container %s: %w", c.id, err)
}
if err := c.r.makeLogDir(c.id); err != nil {
return nil, err
@ -356,7 +358,7 @@ func (c *container) startProcess(
var consoleSockPath string
sockListener, consoleSockPath, err = c.r.createConsoleSocket(tempProcessDir)
if err != nil {
return nil, errors.Wrapf(err, "failed to create console socket for container %s", c.id)
return nil, fmt.Errorf("failed to create console socket for container %s: %w", c.id, err)
}
defer sockListener.Close()
args = append(args, "--console-socket", consoleSockPath)
@ -369,11 +371,11 @@ func (c *container) startProcess(
if !hasTerminal {
pipeRelay, err = stdio.NewPipeRelay(stdioSet)
if err != nil {
return nil, errors.Wrapf(err, "failed to create a pipe relay connection set for container %s", c.id)
return nil, fmt.Errorf("failed to create a pipe relay connection set for container %s: %w", c.id, err)
}
fileSet, err := pipeRelay.Files()
if err != nil {
return nil, errors.Wrapf(err, "failed to get files for connection set for container %s", c.id)
return nil, fmt.Errorf("failed to get files for connection set for container %s: %w", c.id, err)
}
// Closing the FileSet here is fine as that end of the pipes will have
// already been copied into the child process.
@ -391,7 +393,7 @@ func (c *container) startProcess(
if err := cmd.Run(); err != nil {
runcErr := getRuncLogError(logPath)
return nil, errors.Wrapf(runcErr, "failed to run runc create/exec call for container %s with %v", c.id, err)
return nil, fmt.Errorf("failed to run runc create/exec call for container %s with %v: %w", c.id, err, runcErr)
}
var ttyRelay *stdio.TtyRelay
@ -400,7 +402,7 @@ func (c *container) startProcess(
master, err = c.r.getMasterFromSocket(sockListener)
if err != nil {
_ = cmd.Process.Kill()
return nil, errors.Wrapf(err, "failed to get pty master for process in container %s", c.id)
return nil, fmt.Errorf("failed to get pty master for process in container %s: %w", c.id, err)
}
// Keep master open for the relay unless there is an error.
defer func() {
@ -439,7 +441,7 @@ func (c *container) Update(resources interface{}) error {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return errors.Wrapf(runcErr, "runc update request %s failed with %v: %s", string(jsonResources), err, string(out))
return fmt.Errorf("runc update request %s failed with %v: %s: %w", string(jsonResources), err, string(out), runcErr)
}
return nil
}

Просмотреть файл

@ -4,11 +4,12 @@
package runc
import (
"errors"
"fmt"
"net"
"os"
"path/filepath"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@ -19,11 +20,11 @@ func (*runcRuntime) createConsoleSocket(processDir string) (listener *net.UnixLi
socketPath = filepath.Join(processDir, "master.sock")
addr, err := net.ResolveUnixAddr("unix", socketPath)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to resolve unix socket at address %s", socketPath)
return nil, "", fmt.Errorf("failed to resolve unix socket at address %s: %w", socketPath, err)
}
listener, err = net.ListenUnix("unix", addr)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to listen on unix socket at address %s", socketPath)
return nil, "", fmt.Errorf("failed to listen on unix socket at address %s: %w", socketPath, err)
}
return listener, socketPath, nil
}
@ -35,7 +36,7 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os.
// Accept the listener's connection.
conn, err := listener.Accept()
if err != nil {
return nil, errors.Wrap(err, "failed to get terminal master file descriptor from socket")
return nil, fmt.Errorf("failed to get terminal master file descriptor from socket: %w", err)
}
defer conn.Close()
unixConn, ok := conn.(*net.UnixConn)
@ -53,10 +54,10 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os.
// sent.
n, oobn, _, _, err := unixConn.ReadMsgUnix(name, oob)
if err != nil {
return nil, errors.Wrap(err, "failed to read message from unix socket")
return nil, fmt.Errorf("failed to read message from unix socket: %w", err)
}
if n >= maxNameLen || oobn != oobSpace {
return nil, errors.Errorf("read an invalid number of bytes (n=%d oobn=%d)", n, oobn)
return nil, fmt.Errorf("read an invalid number of bytes (n=%d oobn=%d)", n, oobn)
}
// Truncate the data returned from the message.
@ -66,26 +67,26 @@ func (*runcRuntime) getMasterFromSocket(listener *net.UnixListener) (master *os.
// Parse the out-of-band data in the message.
messages, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse socket control message for oob %v", oob)
return nil, fmt.Errorf("failed to parse socket control message for oob %v: %w", oob, err)
}
if len(messages) == 0 {
return nil, errors.New("did not receive any socket control messages")
}
if len(messages) > 1 {
return nil, errors.Errorf("received more than one socket control message: received %d", len(messages))
return nil, fmt.Errorf("received more than one socket control message: received %d", len(messages))
}
message := messages[0]
// Parse the file descriptor out of the out-of-band data in the message.
fds, err := unix.ParseUnixRights(&message)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse file descriptors out of message %v", message)
return nil, fmt.Errorf("failed to parse file descriptors out of message %v: %w", message, err)
}
if len(fds) == 0 {
return nil, errors.New("did not receive any file descriptors")
}
if len(fds) > 1 {
return nil, errors.Errorf("received more than one file descriptor: received %d", len(fds))
return nil, fmt.Errorf("received more than one file descriptor: received %d", len(fds))
}
fd := uintptr(fds[0])
@ -101,7 +102,7 @@ func (*runcRuntime) pathExists(pathToCheck string) (bool, error) {
if os.IsNotExist(err) {
return false, nil
}
return false, errors.Wrapf(err, "failed call to Stat for path %s", pathToCheck)
return false, fmt.Errorf("failed call to Stat for path %s: %w", pathToCheck, err)
}
return true, nil
}

Просмотреть файл

@ -5,6 +5,7 @@ package runc
import (
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
@ -13,7 +14,7 @@ import (
"syscall"
oci "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/Microsoft/hcsshim/internal/guest/commonutils"
@ -57,7 +58,7 @@ func (r *runcRuntime) initialize() error {
return err
}
if err := os.MkdirAll(p, 0700); err != nil {
return errors.Wrapf(err, "failed making runC container files directory %s", p)
return fmt.Errorf("failed making runC container files directory %s: %w", p, err)
}
}
}
@ -84,11 +85,11 @@ func (*runcRuntime) ListContainerStates() ([]runtime.ContainerState, error) {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return nil, errors.Wrapf(runcErr, "runc list failed with %v: %s", err, string(out))
return nil, fmt.Errorf("runc list failed with %v: %s: %w", err, string(out), runcErr)
}
var states []runtime.ContainerState
if err := json.Unmarshal(out, &states); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal the states for the container list")
return nil, fmt.Errorf("failed to unmarshal the states for the container list: %w", err)
}
return states, nil
}
@ -100,11 +101,11 @@ func (*runcRuntime) getRunningPids(id string) ([]int, error) {
out, err := cmd.CombinedOutput()
if err != nil {
runcErr := parseRuncError(string(out))
return nil, errors.Wrapf(runcErr, "runc ps failed with %v: %s", err, string(out))
return nil, fmt.Errorf("runc ps failed with %v: %s: %w", err, string(out), runcErr)
}
var pids []int
if err := json.Unmarshal(out, &pids); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal pids for container %s", id)
return nil, fmt.Errorf("failed to unmarshal pids for container %s: %w", id, err)
}
return pids, nil
}
@ -116,7 +117,7 @@ func (*runcRuntime) getProcessCommand(pid int) ([]string, error) {
// with a null character after every argument. e.g. "ping google.com "
data, err := os.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "cmdline"))
if err != nil {
return nil, errors.Wrapf(err, "failed to read cmdline file for process %d", pid)
return nil, fmt.Errorf("failed to read cmdline file for process %d: %w", pid, err)
}
// Get rid of the \0 character at end.
cmdString := strings.TrimSuffix(string(data), "\x00")
@ -139,11 +140,11 @@ func (*runcRuntime) pidMapToProcessStates(pidMap map[int]*runtime.ContainerProce
func (r *runcRuntime) waitOnProcess(pid int) (int, error) {
process, err := os.FindProcess(pid)
if err != nil {
return -1, errors.Wrapf(err, "failed to find process %d", pid)
return -1, fmt.Errorf("failed to find process %d: %w", pid, err)
}
state, err := process.Wait()
if err != nil {
return -1, errors.Wrapf(err, "failed waiting on process %d", pid)
return -1, fmt.Errorf("failed waiting on process %d: %w", pid, err)
}
status := state.Sys().(syscall.WaitStatus)
@ -212,12 +213,12 @@ func ociSpecFromBundle(bundlePath string) (*oci.Spec, error) {
configPath := filepath.Join(bundlePath, "config.json")
configFile, err := os.Open(configPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to open bundle config at %s", configPath)
return nil, fmt.Errorf("failed to open bundle config at %s: %w", configPath, err)
}
defer configFile.Close()
var spec *oci.Spec
if err := commonutils.DecodeJSONWithHresult(configFile, &spec); err != nil {
return nil, errors.Wrap(err, "failed to parse OCI spec")
return nil, fmt.Errorf("failed to parse OCI spec: %w", err)
}
return spec, nil
}

Просмотреть файл

@ -5,6 +5,8 @@ package runc
import (
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
@ -12,7 +14,6 @@ import (
"strings"
"syscall"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/Microsoft/hcsshim/internal/guest/runtime"
@ -22,11 +23,11 @@ import (
func (r *runcRuntime) readPidFile(pidFile string) (pid int, err error) {
data, err := os.ReadFile(pidFile)
if err != nil {
return -1, errors.Wrap(err, "failed reading from pid file")
return -1, fmt.Errorf("failed reading from pid file: %w", err)
}
pid, err = strconv.Atoi(string(data))
if err != nil {
return -1, errors.Wrapf(err, "failed converting pid text %q to integer form", data)
return -1, fmt.Errorf("failed converting pid text %q to integer form: %w", data, err)
}
return pid, nil
}
@ -35,7 +36,7 @@ func (r *runcRuntime) readPidFile(pidFile string) (pid int, err error) {
func (r *runcRuntime) cleanupContainer(id string) error {
containerDir := r.getContainerDir(id)
if err := os.RemoveAll(containerDir); err != nil {
return errors.Wrapf(err, "failed removing the container directory for container %s", id)
return fmt.Errorf("failed removing the container directory for container %s: %w", id, err)
}
return nil
}
@ -44,7 +45,7 @@ func (r *runcRuntime) cleanupContainer(id string) error {
func (r *runcRuntime) cleanupProcess(id string, pid int) error {
processDir := r.getProcessDir(id, pid)
if err := os.RemoveAll(processDir); err != nil {
return errors.Wrapf(err, "failed removing the process directory for process %d in container %s", pid, id)
return fmt.Errorf("failed removing the process directory for process %d in container %s: %w", pid, id, err)
}
return nil
}
@ -65,7 +66,7 @@ func (*runcRuntime) getContainerDir(id string) string {
func (r *runcRuntime) makeContainerDir(id string) error {
dir := r.getContainerDir(id)
if err := os.MkdirAll(dir, os.ModeDir); err != nil {
return errors.Wrapf(err, "failed making container directory for container %s", id)
return fmt.Errorf("failed making container directory for container %s: %w", id, err)
}
return nil
}
@ -79,7 +80,7 @@ func (r *runcRuntime) getLogDir(id string) string {
func (r *runcRuntime) makeLogDir(id string) error {
dir := r.getLogDir(id)
if err := os.MkdirAll(dir, os.ModeDir); err != nil {
return errors.Wrapf(err, "failed making runc log directory for container %s", id)
return fmt.Errorf("failed making runc log directory for container %s: %w", id, err)
}
return nil
}
@ -118,7 +119,7 @@ type standardLogEntry struct {
func (l *standardLogEntry) asError() (err error) {
err = parseRuncError(l.Message)
if l.Err != nil {
err = errors.Wrapf(err, l.Err.Error())
err = fmt.Errorf(l.Err.Error()+": %w", err)
}
return
}

Просмотреть файл

@ -4,10 +4,11 @@
package stdio
import (
"fmt"
"os"
"github.com/Microsoft/hcsshim/internal/guest/transport"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -75,7 +76,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect
if settings.StdIn != nil {
c, err := tport.Dial(*settings.StdIn)
if err != nil {
return nil, errors.Wrap(err, "failed creating stdin Connection")
return nil, fmt.Errorf("failed creating stdin Connection: %w", err)
}
connSet.In = &logConnection{
con: c,
@ -85,7 +86,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect
if settings.StdOut != nil {
c, err := tport.Dial(*settings.StdOut)
if err != nil {
return nil, errors.Wrap(err, "failed creating stdout Connection")
return nil, fmt.Errorf("failed creating stdout Connection: %w", err)
}
connSet.Out = &logConnection{
con: c,
@ -95,7 +96,7 @@ func Connect(tport transport.Transport, settings ConnectionSettings) (_ *Connect
if settings.StdErr != nil {
c, err := tport.Dial(*settings.StdErr)
if err != nil {
return nil, errors.Wrap(err, "failed creating stderr Connection")
return nil, fmt.Errorf("failed creating stderr Connection: %w", err)
}
connSet.Err = &logConnection{
con: c,

Просмотреть файл

@ -4,13 +4,15 @@
package stdio
import (
"errors"
"fmt"
"io"
"os"
"strings"
"sync"
"github.com/Microsoft/hcsshim/internal/guest/transport"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -25,19 +27,19 @@ func (s *ConnectionSet) Close() error {
var err error
if s.In != nil {
if cerr := s.In.Close(); cerr != nil {
err = errors.Wrap(cerr, "failed Close on stdin")
err = fmt.Errorf("failed Close on stdin: %w", cerr)
}
s.In = nil
}
if s.Out != nil {
if cerr := s.Out.Close(); cerr != nil && err == nil {
err = errors.Wrap(cerr, "failed Close on stdout")
err = fmt.Errorf("failed Close on stdout: %w", cerr)
}
s.Out = nil
}
if s.Err != nil {
if cerr := s.Err.Close(); cerr != nil && err == nil {
err = errors.Wrap(cerr, "failed Close on stderr")
err = fmt.Errorf("failed Close on stderr: %w", cerr)
}
s.Err = nil
}
@ -55,19 +57,19 @@ func (fs *FileSet) Close() error {
var err error
if fs.In != nil {
if cerr := fs.In.Close(); cerr != nil {
err = errors.Wrap(cerr, "failed Close on stdin")
err = fmt.Errorf("failed Close on stdin: %w", cerr)
}
fs.In = nil
}
if fs.Out != nil {
if cerr := fs.Out.Close(); cerr != nil && err == nil {
err = errors.Wrap(cerr, "failed Close on stdout")
err = fmt.Errorf("failed Close on stdout: %w", cerr)
}
fs.Out = nil
}
if fs.Err != nil {
if cerr := fs.Err.Close(); cerr != nil && err == nil {
err = errors.Wrap(cerr, "failed Close on stderr")
err = fmt.Errorf("failed Close on stderr: %w", cerr)
}
fs.Err = nil
}
@ -86,19 +88,19 @@ func (s *ConnectionSet) Files() (_ *FileSet, err error) {
if s.In != nil {
fs.In, err = s.In.File()
if err != nil {
return nil, errors.Wrap(err, "failed to dup stdin socket for command")
return nil, fmt.Errorf("failed to dup stdin socket for command: %w", err)
}
}
if s.Out != nil {
fs.Out, err = s.Out.File()
if err != nil {
return nil, errors.Wrap(err, "failed to dup stdout socket for command")
return nil, fmt.Errorf("failed to dup stdout socket for command: %w", err)
}
}
if s.Err != nil {
fs.Err, err = s.Err.File()
if err != nil {
return nil, errors.Wrap(err, "failed to dup stderr socket for command")
return nil, fmt.Errorf("failed to dup stderr socket for command: %w", err)
}
}
return fs, nil
@ -117,19 +119,19 @@ func NewPipeRelay(s *ConnectionSet) (_ *PipeRelay, err error) {
if s == nil || s.In != nil {
pr.pipes[0], pr.pipes[1], err = os.Pipe()
if err != nil {
return nil, errors.Wrap(err, "failed to create stdin pipe relay")
return nil, fmt.Errorf("failed to create stdin pipe relay: %w", err)
}
}
if s == nil || s.Out != nil {
pr.pipes[2], pr.pipes[3], err = os.Pipe()
if err != nil {
return nil, errors.Wrap(err, "failed to create stdout pipe relay")
return nil, fmt.Errorf("failed to create stdout pipe relay: %w", err)
}
}
if s == nil || s.Err != nil {
pr.pipes[4], pr.pipes[5], err = os.Pipe()
if err != nil {
return nil, errors.Wrap(err, "failed to create stderr pipe relay")
return nil, fmt.Errorf("failed to create stderr pipe relay: %w", err)
}
}
return pr, nil

Просмотреть файл

@ -9,7 +9,6 @@ import (
"syscall"
"unsafe"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@ -18,7 +17,7 @@ import (
func NewConsole() (*os.File, string, error) {
master, err := os.OpenFile("/dev/ptmx", syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_CLOEXEC, 0)
if err != nil {
return nil, "", errors.Wrap(err, "failed to open master pseudoterminal file")
return nil, "", fmt.Errorf("failed to open master pseudoterminal file: %w", err)
}
console, err := ptsname(master)
if err != nil {
@ -29,10 +28,10 @@ func NewConsole() (*os.File, string, error) {
}
// TODO: Do we need to keep this chmod call?
if err := os.Chmod(console, 0600); err != nil {
return nil, "", errors.Wrap(err, "failed to change permissions on the slave pseudoterminal file")
return nil, "", fmt.Errorf("failed to change permissions on the slave pseudoterminal file: %w", err)
}
if err := os.Chown(console, 0, 0); err != nil {
return nil, "", errors.Wrap(err, "failed to change ownership on the slave pseudoterminal file")
return nil, "", fmt.Errorf("failed to change ownership on the slave pseudoterminal file: %w", err)
}
return master, console, nil
}
@ -62,7 +61,7 @@ func ioctl(fd uintptr, flag, data uintptr) error {
func ptsname(f *os.File) (string, error) {
var n int32
if err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
return "", errors.Wrap(err, "ioctl TIOCGPTN failed for ptsname")
return "", fmt.Errorf("ioctl TIOCGPTN failed for ptsname: %w", err)
}
return fmt.Sprintf("/dev/pts/%d", n), nil
}
@ -72,7 +71,7 @@ func ptsname(f *os.File) (string, error) {
func unlockpt(f *os.File) error {
var u int32
if err := ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != nil {
return errors.Wrap(err, "ioctl TIOCSPTLCK failed for unlockpt")
return fmt.Errorf("ioctl TIOCSPTLCK failed for unlockpt: %w", err)
}
return nil
}

Просмотреть файл

@ -13,7 +13,6 @@ import (
"time"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@ -96,7 +95,8 @@ func cryptsetupFormat(ctx context.Context, source string, keyFilePath string) er
// supposed to derive a strong key from it. In our case, we already pass
// a strong key to cryptsetup, so we don't need a strong KDF. Ideally,
// it would be bypassed completely, but this isn't possible.
"--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000"}
"--pbkdf", "pbkdf2", "--pbkdf-force-iterations", "1000",
}
return cryptsetupCommand(ctx, formatArgs)
}
@ -107,7 +107,8 @@ func cryptsetupOpen(ctx context.Context, source string, deviceName string, keyFi
// Open device with the key passed to luksFormat
"luksOpen", source, deviceName, "--key-file", keyFilePath,
// Don't use a journal to increase performance
"--integrity-no-journal", "--persistent"}
"--integrity-no-journal", "--persistent",
}
return cryptsetupCommand(ctx, openArgs)
}
@ -148,7 +149,7 @@ func EncryptDevice(ctx context.Context, source string, dmCryptName string) (path
// Create temporary directory to store the keyfile and xfs image
tempDir, err := _osMkdirTemp("", "dm-crypt")
if err != nil {
return "", errors.Wrapf(err, "failed to create temporary folder: %s", source)
return "", fmt.Errorf("failed to create temporary folder: %w", err)
}
defer func() {

Просмотреть файл

@ -5,9 +5,8 @@ package crypt
import (
"context"
"errors"
"testing"
"github.com/pkg/errors"
)
const tempDir = "/tmp/dir/"
@ -47,7 +46,7 @@ func Test_Encrypt_Generate_Key_Error(t *testing.T) {
}
_, err := EncryptDevice(context.Background(), source, "dm-crypt-target")
if errors.Unwrap(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err)
}
}
@ -80,7 +79,7 @@ func Test_Encrypt_Cryptsetup_Format_Error(t *testing.T) {
}
_, err := EncryptDevice(context.Background(), expectedSource, "dm-crypt-target")
if errors.Unwrap(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: '%v' got: '%v", expectedErr, err)
}
}
@ -120,7 +119,7 @@ func Test_Encrypt_Cryptsetup_Open_Error(t *testing.T) {
}
_, err := EncryptDevice(context.Background(), expectedSource, dmCryptName)
if errors.Unwrap(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err)
}
}
@ -175,7 +174,7 @@ func Test_Cleanup_Dm_Crypt_Error(t *testing.T) {
}
err := CleanupCryptDevice(context.TODO(), dmCryptName)
if errors.Unwrap(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: '%v' got: '%v'", expectedErr, err)
}
}

Просмотреть файл

@ -6,21 +6,22 @@ package storage
import (
"bufio"
"context"
gerrors "errors"
"errors"
"fmt"
"os"
"strings"
"syscall"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
"github.com/Microsoft/hcsshim/internal/oc"
)
const procMountFile = "/proc/mounts"
const numProcMountFields = 6
const (
procMountFile = "/proc/mounts"
numProcMountFields = 6
)
// Test dependencies
var (
@ -128,14 +129,14 @@ func UnmountPath(ctx context.Context, target string, removeTarget bool) (err err
if os.IsNotExist(err) {
return nil
}
return errors.Wrapf(err, "failed to determine if path '%s' exists", target)
return fmt.Errorf("failed to determine if path %q exists: %w", target, err)
}
if err := unixUnmount(target, 0); err != nil {
// If `Unmount` returns `EINVAL` it's not mounted. Just delete the
// folder.
if !gerrors.Is(err, unix.EINVAL) {
return errors.Wrapf(err, "failed to unmount path '%s'", target)
if !errors.Is(err, unix.EINVAL) {
return fmt.Errorf("failed to unmount path %q: %w", target, err)
}
}
if removeTarget {

Просмотреть файл

@ -5,10 +5,11 @@ package storage
import (
"context"
"errors"
"fmt"
"os"
"testing"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
@ -59,7 +60,7 @@ func Test_Unmount_Stat_OtherError_Error(t *testing.T) {
return nil, expectedErr
}
err := UnmountPath(context.Background(), "/dev/fake", false)
if errors.Cause(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: %v, got: %v", expectedErr, err)
}
}
@ -129,7 +130,7 @@ func Test_Unmount_OtherError(t *testing.T) {
return expectedErr
}
err := UnmountPath(context.Background(), "/dev/fake", false)
if errors.Cause(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: %v, got: %v", expectedErr, err)
}
}
@ -195,7 +196,7 @@ func Test_UnmountAllInPath_Unmount_Order(t *testing.T) {
timesCalled := 0
unixUnmount = func(target string, flags int) error {
if timesCalled == 0 && target != child {
return errors.Errorf("expected to unmount %v first, got %v", child, target)
return fmt.Errorf("expected to unmount %v first, got %v", child, target)
}
timesCalled += 1
return nil
@ -206,7 +207,6 @@ func Test_UnmountAllInPath_Unmount_Order(t *testing.T) {
}
err := UnmountAllInPath(context.Background(), parent, true)
if err != nil {
t.Fatalf("expected nil error, got: %v", err)
}

Просмотреть файл

@ -5,6 +5,7 @@ package overlay
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
@ -13,7 +14,7 @@ import (
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/memory"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
@ -108,13 +109,13 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t
}
if readonly && (upperdirPath != "" || workdirPath != "") {
return errors.Errorf("upperdirPath: %q, and workdirPath: %q must be empty when readonly==true", upperdirPath, workdirPath)
return fmt.Errorf("upperdirPath: %q, and workdirPath: %q must be empty when readonly==true", upperdirPath, workdirPath)
}
options := []string{"lowerdir=" + lowerdir}
if upperdirPath != "" {
if err := osMkdirAll(upperdirPath, 0755); err != nil {
return errors.Wrap(err, "failed to create upper directory in scratch space")
return fmt.Errorf("failed to create upper directory in scratch space: %w", err)
}
defer func() {
if err != nil {
@ -125,7 +126,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t
}
if workdirPath != "" {
if err := osMkdirAll(workdirPath, 0755); err != nil {
return errors.Wrap(err, "failed to create workdir in scratch space")
return fmt.Errorf("failed to create workdir in scratch space: %w", err)
}
defer func() {
if err != nil {
@ -135,7 +136,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t
options = append(options, "workdir="+workdirPath)
}
if err := osMkdirAll(target, 0755); err != nil {
return errors.Wrapf(err, "failed to create directory for container root filesystem %s", target)
return fmt.Errorf("failed to create directory for container root filesystem %s: %w", target, err)
}
defer func() {
if err != nil {
@ -147,7 +148,7 @@ func Mount(ctx context.Context, basePaths []string, upperdirPath, workdirPath, t
flags |= unix.MS_RDONLY
}
if err := unixMount("overlay", target, "overlay", flags, strings.Join(options, ",")); err != nil {
return errors.Wrapf(err, "failed to mount overlayfs at %s", target)
return fmt.Errorf("failed to mount overlayfs at %s: %w", target, err)
}
return nil
}

Просмотреть файл

@ -11,7 +11,6 @@ import (
"github.com/Microsoft/hcsshim/internal/guest/transport"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
)
@ -40,7 +39,7 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string,
trace.Int64Attribute("port", int64(port)),
trace.BoolAttribute("readonly", readonly))
if err := osMkdirAll(target, 0700); err != nil {
if err := osMkdirAll(target, 0o700); err != nil {
return err
}
defer func() {
@ -50,12 +49,12 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string,
}()
conn, err := vsock.Dial(port)
if err != nil {
return errors.Wrapf(err, "could not connect to plan9 server for %s", target)
return fmt.Errorf("could not connect to plan9 server for %s: %w", target, err)
}
f, err := conn.File()
conn.Close()
if err != nil {
return errors.Wrapf(err, "could not get file for plan9 connection for %s", target)
return fmt.Errorf("could not get file for plan9 connection for %s: %w", target, err)
}
defer f.Close()
@ -72,14 +71,14 @@ func Mount(ctx context.Context, vsock transport.Transport, target, share string,
// set socket options to maximize bandwidth
err = syscall.SetsockoptInt(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF, packetPayloadBytes)
if err != nil {
return errors.Wrapf(err, "failed to set sock option syscall.SO_RCVBUF to %v on fd %v", packetPayloadBytes, f.Fd())
return fmt.Errorf("failed to set sock option syscall.SO_RCVBUF to %v on fd %v: %w", packetPayloadBytes, f.Fd(), err)
}
err = syscall.SetsockoptInt(int(f.Fd()), syscall.SOL_SOCKET, syscall.SO_SNDBUF, packetPayloadBytes)
if err != nil {
return errors.Wrapf(err, "failed to set sock option syscall.SO_SNDBUF to %v on fd %v", packetPayloadBytes, f.Fd())
return fmt.Errorf("failed to set sock option syscall.SO_SNDBUF to %v on fd %v: %w", packetPayloadBytes, f.Fd(), err)
}
if err := unixMount(target, target, "9p", mountOptions, data); err != nil {
return errors.Wrapf(err, "failed to mount directory for mapped directory %s", target)
return fmt.Errorf("failed to mount directory for mapped directory %s: %w", target, err)
}
return nil
}

Просмотреть файл

@ -8,7 +8,6 @@ import (
"fmt"
"os"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
@ -38,7 +37,7 @@ const (
// mount mounts source to target via unix.Mount
func mount(ctx context.Context, source, target string) (err error) {
if err := osMkdirAll(target, 0700); err != nil {
if err := osMkdirAll(target, 0o700); err != nil {
return err
}
defer func() {
@ -51,7 +50,7 @@ func mount(ctx context.Context, source, target string) (err error) {
flags := uintptr(unix.MS_RDONLY)
if err := unixMount(source, target, "ext4", flags, "noload"); err != nil {
return errors.Wrapf(err, "failed to mount %s onto %s", source, target)
return fmt.Errorf("failed to mount %s onto %s: %w", source, target, err)
}
return nil
}
@ -141,7 +140,7 @@ func Unmount(
trace.StringAttribute("target", target))
if err := storage.UnmountPath(ctx, target, true); err != nil {
return errors.Wrapf(err, "failed to unmount target: %s", target)
return fmt.Errorf("failed to unmount target %s: %w", target, err)
}
if verityInfo != nil {

Просмотреть файл

@ -5,11 +5,11 @@ package pmem
import (
"context"
"errors"
"fmt"
"os"
"testing"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"github.com/Microsoft/hcsshim/internal/protocol/guestresource"
@ -33,7 +33,7 @@ func Test_Mount_Mkdir_Fails_Error(t *testing.T) {
return expectedErr
}
err := Mount(context.Background(), 0, "", nil, nil)
if errors.Cause(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: %v, got: %v", expectedErr, err)
}
}
@ -70,8 +70,8 @@ func Test_Mount_Mkdir_ExpectedPerm(t *testing.T) {
target := "/fake/path"
osMkdirAll = func(path string, perm os.FileMode) error {
if perm != os.FileMode(0700) {
t.Errorf("expected perm: %v, got: %v", os.FileMode(0700), perm)
if perm != os.FileMode(0o700) {
t.Errorf("expected perm: %v, got: %v", os.FileMode(0o700), perm)
return errors.New("unexpected perm")
}
return nil
@ -108,7 +108,7 @@ func Test_Mount_Calls_RemoveAll_OnMountFailure(t *testing.T) {
return expectedErr
}
err := Mount(context.Background(), 0, target, nil, nil)
if errors.Cause(err) != expectedErr { //nolint:errorlint
if !errors.Is(err, expectedErr) {
t.Fatalf("expected err: %v, got: %v", expectedErr, err)
}
if !removeAllCalled {

Просмотреть файл

@ -5,6 +5,7 @@ package scsi
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
@ -14,7 +15,6 @@ import (
"strings"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"golang.org/x/sys/unix"
@ -134,7 +134,8 @@ func Mount(
target string,
readonly bool,
options []string,
config *Config) (err error) {
config *Config,
) (err error) {
spnCtx, span := oc.StartSpan(ctx, "scsi::Mount")
defer span.End()
defer func() { oc.SetSpanStatus(span, err) }()
@ -170,7 +171,7 @@ func Mount(
// create and symlink block device mount target
if config.BlockDev {
parent := filepath.Dir(target)
if err := osMkdirAll(parent, 0700); err != nil {
if err := osMkdirAll(parent, 0o700); err != nil {
return err
}
log.G(ctx).WithFields(logrus.Fields{
@ -180,7 +181,7 @@ func Mount(
return osSymlink(source, target)
}
if err := osMkdirAll(target, 0700); err != nil {
if err := osMkdirAll(target, 0o700); err != nil {
return err
}
defer func() {
@ -308,7 +309,7 @@ func Unmount(
// unmount target
if err := storageUnmountPath(ctx, target, true); err != nil {
return errors.Wrapf(err, "unmount failed: %s", target)
return fmt.Errorf("unmount %q failed: %w", target, err)
}
if config.VerityInfo != nil {
@ -366,7 +367,7 @@ func GetDevicePath(ctx context.Context, controller, lun uint8, partition uint64)
}
if len(deviceNames) > 1 {
return "", errors.Errorf("more than one block device could match SCSI ID \"%s\"", scsiID)
return "", fmt.Errorf("more than one block device could match SCSI ID %q", scsiID)
}
deviceName := deviceNames[0].Name()
@ -442,7 +443,7 @@ func UnplugDevice(ctx context.Context, controller, lun uint8) (err error) {
trace.Int64Attribute("lun", int64(lun)))
scsiID := fmt.Sprintf("%d:0:0:%d", controller, lun)
f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0644)
f, err := os.OpenFile(filepath.Join(scsiDevicesPath, scsiID, "delete"), os.O_WRONLY, 0o644)
if err != nil {
if os.IsNotExist(err) {
return nil

Просмотреть файл

@ -8,8 +8,6 @@ import (
"fmt"
"path/filepath"
"time"
"github.com/pkg/errors"
)
// export this variable so it can be mocked to aid in testing for consuming packages
@ -26,13 +24,16 @@ func WaitForFileMatchingPattern(ctx context.Context, pattern string) (string, er
if len(files) == 0 {
select {
case <-ctx.Done():
return "", errors.Wrapf(ctx.Err(), "timed out waiting for file matching pattern %s to exist", pattern)
if err := ctx.Err(); err != nil {
return "", fmt.Errorf("timed out waiting for file matching pattern %s to exist: %w", pattern, err)
}
return "", nil
default:
time.Sleep(time.Millisecond * 10)
continue
}
} else if len(files) > 1 {
return "", fmt.Errorf("more than one file could exist for pattern \"%s\"", pattern)
return "", fmt.Errorf("more than one file could exist for pattern %q", pattern)
}
return files[0], nil
}

Просмотреть файл

@ -4,13 +4,12 @@
package transport
import (
gerrors "errors"
"errors"
"fmt"
"syscall"
"time"
"github.com/linuxkit/virtsock/pkg/vsock"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -36,12 +35,11 @@ func (t *VsockTransport) Dial(port uint32) (Connection, error) {
return conn, nil
}
// If the error was ETIMEDOUT retry, otherwise fail.
var errno syscall.Errno
if gerrors.As(err, &errno) && errno == syscall.ETIMEDOUT {
if errors.Is(err, syscall.ETIMEDOUT) {
time.Sleep(100 * time.Millisecond)
continue
} else {
return nil, errors.Wrapf(err, "vsock Dial port (%d) failed", port)
return nil, fmt.Errorf("vsock Dial port (%d) failed: %w", port, err)
}
}
return nil, fmt.Errorf("failed connecting the VsockConnection: can't connect after 10 attempts")

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port.
// ComPort specifies the named pipe that will be used for the port, with empty string indicating a disconnected port.
type ComPort struct {
NamedPipe string `json:"NamedPipe,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// memory usage as viewed from within the container
// memory usage as viewed from within the container
type ContainerMemoryInformation struct {
TotalPhysicalBytes int32 `json:"TotalPhysicalBytes,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Information about the guest.
// Information about the guest.
type GuestConnectionInfo struct {
// Each schema version x.y stands for the range of versions a.b where a==x and b<=y. This list comes from the SupportedSchemaVersions field in GcsCapabilities.

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// HvSocket configuration for a VM
// HvSocket configuration for a VM
type HvSocket2 struct {
HvSocketConfig *HvSocketSystemConfig `json:"HvSocketConfig,omitempty"`
}

Просмотреть файл

@ -9,8 +9,8 @@
package hcsschema
// This class defines address settings applied to a VM
// by the GCS every time a VM starts or restores.
// This class defines address settings applied to a VM
// by the GCS every time a VM starts or restores.
type HvSocketAddress struct {
LocalAddress string `json:"LocalAddress,omitempty"`
ParentAddress string `json:"ParentAddress,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1.
// This is the HCS Schema version of the HvSocket configuration. The VMWP version is located in Config.Devices.IC in V1.
type HvSocketSystemConfig struct {
// SDDL string that HvSocket will check before allowing a host process to bind to an unlisted service for this specific container/VM (not wildcard binds).

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Memory runtime statistics
// Memory runtime statistics
type MemoryStats struct {
MemoryUsageCommitBytes uint64 `json:"MemoryUsageCommitBytes,omitempty"`

Просмотреть файл

@ -12,10 +12,10 @@
package hcsschema
type NumaSetting struct {
VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"`
PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"`
VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"`
CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"`
CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"`
MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"`
VirtualNodeNumber uint32 `json:"VirtualNodeNumber,omitempty"`
PhysicalNodeNumber uint32 `json:"PhysicalNodeNumber,omitempty"`
VirtualSocketNumber uint32 `json:"VirtualSocketNumber,omitempty"`
CountOfProcessors uint32 `json:"CountOfProcessors,omitempty"`
CountOfMemoryBlocks uint64 `json:"CountOfMemoryBlocks,omitempty"`
MemoryBackingType MemoryBackingType `json:"MemoryBackingType,omitempty"`
}

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Notification data that is indicated to components running in the Virtual Machine.
// Notification data that is indicated to components running in the Virtual Machine.
type PauseNotification struct {
Reason string `json:"Reason,omitempty"`
}

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Options for HcsPauseComputeSystem
// Options for HcsPauseComputeSystem
type PauseOptions struct {
SuspensionLevel string `json:"SuspensionLevel,omitempty"`

Просмотреть файл

@ -13,7 +13,7 @@ import (
"time"
)
// Information about a process running in a container
// Information about a process running in a container
type ProcessDetails struct {
ProcessId int32 `json:"ProcessId,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// CPU runtime statistics
// CPU runtime statistics
type ProcessorStats struct {
TotalRuntime100ns uint64 `json:"TotalRuntime100ns,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// By default the basic properties will be returned. This query provides a way to request specific properties.
// By default the basic properties will be returned. This query provides a way to request specific properties.
type PropertyQuery struct {
PropertyTypes []PropertyType `json:"PropertyTypes,omitempty"`
}

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Silo job information
// Silo job information
type SiloProperties struct {
Enabled bool `json:"Enabled,omitempty"`

Просмотреть файл

@ -13,7 +13,7 @@ import (
"time"
)
// Runtime statistics for a container
// Runtime statistics for a container
type Statistics struct {
Timestamp time.Time `json:"Timestamp,omitempty"`

Просмотреть файл

@ -9,7 +9,7 @@
package hcsschema
// Storage runtime statistics
// Storage runtime statistics
type StorageStats struct {
ReadCountNormalized uint64 `json:"ReadCountNormalized,omitempty"`

Просмотреть файл

@ -12,7 +12,7 @@
package hcsschema
type Topology struct {
Memory *VirtualMachineMemory `json:"Memory,omitempty"`
Processor *VirtualMachineProcessor `json:"Processor,omitempty"`
Numa *Numa `json:"Numa,omitempty"`
Memory *VirtualMachineMemory `json:"Memory,omitempty"`
Processor *VirtualMachineProcessor `json:"Processor,omitempty"`
Numa *Numa `json:"Numa,omitempty"`
}

Просмотреть файл

@ -15,15 +15,15 @@ package hcsschema
type VirtualMachine struct {
Version *Version `json:"Version,omitempty"`
// When set to true, the virtual machine will treat a reset as a stop, releasing resources and cleaning up state.
StopOnReset bool `json:"StopOnReset,omitempty"`
Chipset *Chipset `json:"Chipset,omitempty"`
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
Devices *Devices `json:"Devices,omitempty"`
GuestState *GuestState `json:"GuestState,omitempty"`
RestoreState *RestoreState `json:"RestoreState,omitempty"`
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
StopOnReset bool `json:"StopOnReset,omitempty"`
Chipset *Chipset `json:"Chipset,omitempty"`
ComputeTopology *Topology `json:"ComputeTopology,omitempty"`
Devices *Devices `json:"Devices,omitempty"`
GuestState *GuestState `json:"GuestState,omitempty"`
RestoreState *RestoreState `json:"RestoreState,omitempty"`
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
StorageQoS *StorageQoS `json:"StorageQoS,omitempty"`
DebugOptions *DebugOptions `json:"DebugOptions,omitempty"`
GuestConnection *GuestConnection `json:"GuestConnection,omitempty"`
SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"`
}

Просмотреть файл

@ -15,7 +15,7 @@ type VirtualMachineMemory struct {
SizeInMB uint64 `json:"SizeInMB,omitempty"`
Backing *MemoryBackingType `json:"Backing,omitempty"`
// If enabled, then the VM's memory is backed by the Windows pagefile rather than physically backed, statically allocated memory.
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
AllowOvercommit bool `json:"AllowOvercommit,omitempty"`
// If enabled, then the memory hot hint feature is exposed to the VM, allowing it to prefetch pages into its working set. (if supported by the guest operating system).
EnableHotHint bool `json:"EnableHotHint,omitempty"`
// If enabled, then the memory cold hint feature is exposed to the VM, allowing it to trim zeroed pages from its working set (if supported by the guest operating system).
@ -27,7 +27,7 @@ type VirtualMachineMemory struct {
// Low MMIO region allocated below 4GB
LowMMIOGapInMB uint64 `json:"LowMmioGapInMB,omitempty"`
// High MMIO region allocated above 4GB (base and size)
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
SlitType *VirtualSlitType `json:"SlitType,omitempty"`
HighMMIOBaseInMB uint64 `json:"HighMmioBaseInMB,omitempty"`
HighMMIOGapInMB uint64 `json:"HighMmioGapInMB,omitempty"`
SlitType *VirtualSlitType `json:"SlitType,omitempty"`
}

Просмотреть файл

@ -12,10 +12,10 @@
package hcsschema
type VirtualMachineProcessor struct {
Count uint32 `json:"Count,omitempty"`
Limit uint64 `json:"Limit,omitempty"`
Weight uint64 `json:"Weight,omitempty"`
Reservation uint64 `json:"Reservation,omitempty"`
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"`
Count uint32 `json:"Count,omitempty"`
Limit uint64 `json:"Limit,omitempty"`
Weight uint64 `json:"Weight,omitempty"`
Reservation uint64 `json:"Reservation,omitempty"`
CpuGroup *CpuGroup `json:"CpuGroup,omitempty"`
NumaProcessorsSettings *NumaProcessors `json:"NumaProcessorsSettings,omitempty"`
}

Просмотреть файл

@ -12,6 +12,6 @@ package hcsschema
// TODO: PropagateNumaAffinity is pre-release/experimental field in schema 2.11. Need to add build number
// docs when a public build with this is out.
type VirtualPciDevice struct {
Functions []VirtualPciFunction `json:",omitempty"`
PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"`
Functions []VirtualPciFunction `json:",omitempty"`
PropagateNumaAffinity *bool `json:"PropagateNumaAffinity,omitempty"`
}

Просмотреть файл

@ -4,13 +4,13 @@ package hcs
import (
"context"
"fmt"
"io"
"syscall"
"github.com/Microsoft/go-winio"
diskutil "github.com/Microsoft/go-winio/vhd"
"github.com/Microsoft/hcsshim/computestorage"
"github.com/pkg/errors"
"golang.org/x/sys/windows"
)
@ -42,22 +42,22 @@ func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) {
// CreateNTFSVHD creates a VHD formatted with NTFS of size `sizeGB` at the given `vhdPath`.
func CreateNTFSVHD(ctx context.Context, vhdPath string, sizeGB uint32) (err error) {
if err := diskutil.CreateVhdx(vhdPath, sizeGB, 1); err != nil {
return errors.Wrap(err, "failed to create VHD")
return fmt.Errorf("failed to create VHD: %w", err)
}
vhd, err := diskutil.OpenVirtualDisk(vhdPath, diskutil.VirtualDiskAccessNone, diskutil.OpenVirtualDiskFlagNone)
if err != nil {
return errors.Wrap(err, "failed to open VHD")
return fmt.Errorf("failed to open VHD: %w", err)
}
defer func() {
err2 := windows.CloseHandle(windows.Handle(vhd))
if err == nil {
err = errors.Wrap(err2, "failed to close VHD")
err = fmt.Errorf("failed to close VHD: %w", err2)
}
}()
if err := computestorage.FormatWritableLayerVhd(ctx, windows.Handle(vhd)); err != nil {
return errors.Wrap(err, "failed to format VHD")
return fmt.Errorf("failed to format VHD: %w", err)
}
return nil

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше